code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__UpperCAmelCase ):
"""simple docstring"""
__A : int = ['''onnx''']
def __init__( self , *lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['onnx'])
@classmethod
def __lowercase ( cls , *lowercase , **lowercase) -> str:
'''simple docstring'''
requires_backends(cls , ['onnx'])
@classmethod
def __lowercase ( cls , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
requires_backends(cls , ['onnx'])
| 302
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowercase : Union[str, Any] = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = ['''input_ids''', '''attention_mask''']
__A : Tuple = RobertaTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : Optional[int] = add_prefix_space
a__ : Optional[int] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
a__ : Dict = 'post_processor'
a__ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : List[str] = tuple(state['sep'])
if "cls" in state:
a__ : Any = tuple(state['cls'])
a__ : Union[str, Any] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : int = add_prefix_space
a__ : Dict = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : List[str] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Any = getattr(lowercase , state.pop('type'))
a__ : str = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : List[str] = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Any = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Dict = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Union[str, Any] = [self.sep_token_id]
a__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 302
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : List[Any]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(__a , self ).__init__()
_UpperCamelCase : Union[str, Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : Optional[Any] = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : Optional[Any] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__a : Optional[int] ) -> Optional[int]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any ) -> Union[str, Any]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : int=1 ) -> str:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , __a : Dict ) -> Optional[int]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Union[str, Any] = W_supports["start_token_id"].item()
_UpperCamelCase : Tuple = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : List[str] = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : Dict = None
_UpperCamelCase : int = None
_UpperCamelCase : Tuple = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[Any] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : str = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : List[str] = p_start
_UpperCamelCase : Dict = p_end
return p_starts, p_ends
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( a_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "luke"
def __init__( self : Tuple ,UpperCamelCase : Optional[Any]=5_0267 ,UpperCamelCase : Optional[Any]=50_0000 ,UpperCamelCase : str=768 ,UpperCamelCase : Tuple=256 ,UpperCamelCase : Dict=12 ,UpperCamelCase : Tuple=12 ,UpperCamelCase : Optional[Any]=3072 ,UpperCamelCase : Optional[Any]="gelu" ,UpperCamelCase : List[str]=0.1 ,UpperCamelCase : Any=0.1 ,UpperCamelCase : int=512 ,UpperCamelCase : int=2 ,UpperCamelCase : List[Any]=0.0_2 ,UpperCamelCase : List[Any]=1e-12 ,UpperCamelCase : List[Any]=True ,UpperCamelCase : Any=None ,UpperCamelCase : Tuple=1 ,UpperCamelCase : int=0 ,UpperCamelCase : int=2 ,**UpperCamelCase : int ,) -> Union[str, Any]:
super().__init__(pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase )
_lowercase : str = vocab_size
_lowercase : List[Any] = entity_vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Tuple = entity_emb_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : int = hidden_act
_lowercase : Optional[int] = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : int = initializer_range
_lowercase : Any = layer_norm_eps
_lowercase : str = use_entity_aware_attention
_lowercase : Optional[int] = classifier_dropout
| 125
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 328
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase : Tuple = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase : Optional[Any] = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__SCREAMING_SNAKE_CASE: Optional[int] = bs[:]
__SCREAMING_SNAKE_CASE: Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE: str = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = set()
__SCREAMING_SNAKE_CASE: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = char
return pairs
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
__SCREAMING_SNAKE_CASE: Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
__SCREAMING_SNAKE_CASE: Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
__SCREAMING_SNAKE_CASE: List[str] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
__SCREAMING_SNAKE_CASE: str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
__SCREAMING_SNAKE_CASE: Optional[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE: int = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
__SCREAMING_SNAKE_CASE: Optional[int] = json.load(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE: Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE: int = bytes_to_unicode()
__SCREAMING_SNAKE_CASE: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
__SCREAMING_SNAKE_CASE: List[str] = merges_handle.read().split('''\n''' )[1:-1]
__SCREAMING_SNAKE_CASE: Any = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE: Optional[Any] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: str = {}
__SCREAMING_SNAKE_CASE: List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE: Optional[int] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ ( self ):
"""simple docstring"""
return len(self.encoder )
def snake_case_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE: Union[str, Any] = tuple(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE: Tuple = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = bigram
__SCREAMING_SNAKE_CASE: Dict = []
__SCREAMING_SNAKE_CASE: Dict = 0
while i < len(_lowerCAmelCase ):
try:
__SCREAMING_SNAKE_CASE: Optional[int] = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE: List[Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE: Optional[int] = tuple(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
__SCREAMING_SNAKE_CASE: str = get_pairs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = ''' '''.join(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = word
return word
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = []
for token in re.findall(self.pat , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(''' ''' ) )
return bpe_tokens
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = ''''''.join(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE: Tuple = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE: Tuple = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '''\n''' )
__SCREAMING_SNAKE_CASE: List[str] = 0
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__SCREAMING_SNAKE_CASE: Optional[int] = token_index
writer.write(''' '''.join(_lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = [self.sep_token_id]
__SCREAMING_SNAKE_CASE: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=False , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE: List[str] = ''' ''' + text
return (text, kwargs)
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = ''' '''.join(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = self.encode(_lowerCAmelCase )
if len(_lowerCAmelCase ) > self.model_max_length:
__SCREAMING_SNAKE_CASE: List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 146
|
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = list(range(0 , UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
__SCREAMING_SNAKE_CASE: Any = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE: List[str] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = list(range(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[int] = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
__SCREAMING_SNAKE_CASE: str = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 146
| 1
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCamelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
UpperCamelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
UpperCamelCase = BeautifulSoup(res.text, 'html.parser')
UpperCamelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 61
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_a : List[Any] = True
except (ImportError, ModuleNotFoundError):
_a : Any = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def a__ ( a : str ):
"""simple docstring"""
re.sub("<n>" , "" , a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a ) )
| 87
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( a : List[str] , a : int , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = state_dict.pop(a )
_snake_case : Union[str, Any] = val
def a__ ( a : Tuple ):
"""simple docstring"""
_snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_snake_case : Tuple = value
else:
_snake_case : Dict = value
return new_state_dict
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[:256, :]
_snake_case : List[str] = in_proj_bias[:256]
_snake_case : Optional[Any] = in_proj_weight[256:512, :]
_snake_case : List[str] = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Union[str, Any] = in_proj_weight[:256, :]
_snake_case : Tuple = in_proj_bias[:256]
_snake_case : int = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : Dict = in_proj_weight_cross_attn[:256, :]
_snake_case : Any = in_proj_bias_cross_attn[:256]
_snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_snake_case : Optional[int] = in_proj_bias_cross_attn[256:512]
_snake_case : Any = in_proj_weight_cross_attn[-256:, :]
_snake_case : str = in_proj_bias_cross_attn[-256:]
def a__ ( a : str , a : int ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = image.size
_snake_case : Dict = max(a , a )
_snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000
_snake_case : Any = target_max_size / current_max_size
_snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = F.to_tensor(a )
_snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
_snake_case : Union[str, Any] = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : int = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_snake_case : Optional[int] = state_dict.pop(a )
_snake_case : Any = val
# create HuggingFace model and load state dict
_snake_case : Tuple = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_snake_case : Any = 15
_snake_case : int = 2
_snake_case : Optional[Any] = {0: "table", 1: "table rotated"}
_snake_case : Union[str, Any] = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_snake_case : Any = 125
_snake_case : Union[str, Any] = 6
_snake_case : List[str] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_snake_case : Any = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_snake_case : str = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
_snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a )
_snake_case : Dict = Image.open(a ).convert("RGB" )
_snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 )
_snake_case : str = model(a )
if "detection" in checkpoint_url:
_snake_case : int = (1, 15, 3)
_snake_case : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_snake_case : Union[str, Any] = (1, 125, 7)
_snake_case : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_snake_case : int = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("T")
class lowerCAmelCase__ ( Generic[T] ):
a__ : deque[T] # Cache store of keys
a__ : set[T] # References of the keys in cache
a__ : int = 10 # Maximum capacity of cache
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = deque()
__lowerCamelCase = set()
if not n:
__lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__lowerCamelCase = n
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE__ )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE__ )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE__ )
self.key_reference.add(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] ) -> None:
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE__ )
def __repr__( self : List[str] ) -> str:
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 298
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : List[str]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Dict:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Dict:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Any ) -> Tuple:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Any ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Any ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : int ) -> Union[str, Any]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Any ) -> int:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ) -> Any:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _snake_case ( lowercase__ : Callable[[int | float], int | float] , lowercase__ : int | float , lowercase__ : int | float , lowercase__ : int = 1_0_0 , ) -> float:
'''simple docstring'''
lowerCAmelCase_ :str = x_start
lowerCAmelCase_ :List[str] = fnc(lowercase__ )
lowerCAmelCase_ :Tuple = 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ :List[Any] = (x_end - x_start) / steps + xa
lowerCAmelCase_ :Dict = fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ :Optional[Any] = xa
lowerCAmelCase_ :Union[str, Any] = fxa
return area
if __name__ == "__main__":
def _snake_case ( lowercase__ : Any ) -> List[Any]:
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__UpperCAmelCase = 10
while i <= 10_00_00:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 256
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=2 , __A=32 , __A=16 , __A=3 , __A=True , __A=True , __A=32 , __A=4 , __A=[0, 1, 2, 3] , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=3 , __A=[1, 384, 24, 24] , __A=True , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = parent
lowerCAmelCase_ :Optional[int] = batch_size
lowerCAmelCase_ :Dict = image_size
lowerCAmelCase_ :Tuple = patch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Tuple = is_training
lowerCAmelCase_ :Dict = use_labels
lowerCAmelCase_ :Union[str, Any] = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :List[Any] = backbone_out_indices
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Tuple = attention_probs_dropout_prob
lowerCAmelCase_ :List[Any] = initializer_range
lowerCAmelCase_ :Optional[int] = num_labels
lowerCAmelCase_ :List[Any] = backbone_featmap_shape
lowerCAmelCase_ :List[Any] = scope
lowerCAmelCase_ :Tuple = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ :Optional[int] = (image_size // patch_size) ** 2
lowerCAmelCase_ :Optional[Any] = num_patches + 1
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :List[str] = None
if self.use_labels:
lowerCAmelCase_ :Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ :List[str] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__A , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Dict = DPTModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :List[str] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.num_labels
lowerCAmelCase_ :Optional[Any] = DPTForDepthEstimation(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :int = self.num_labels
lowerCAmelCase_ :Optional[int] = DPTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = config_and_inputs
lowerCAmelCase_ :str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :str = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase_ :Union[str, Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Dict = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = DPTModelTester(self )
lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
lowerCAmelCase_ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :int = [*signature.parameters.keys()]
lowerCAmelCase_ :int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Any = True
if model_class in get_values(__A ):
continue
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
model.to(__A )
model.train()
lowerCAmelCase_ :Optional[int] = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase_ :Dict = model(**__A ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Dict = False
lowerCAmelCase_ :Optional[Any] = True
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase_ :Optional[Any] = model_class(__A )
model.to(__A )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase_ :Any = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase_ :Dict = model(**__A ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = _config_zero_init(__A )
for model_class in self.all_model_classes:
lowerCAmelCase_ :str = model_class(config=__A )
# Skip the check for the backbone
lowerCAmelCase_ :List[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase_ :Tuple = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase_ :List[Any] = DPTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __lowerCAmelCase ( self ) -> int:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :List[Any] = """add"""
with self.assertRaises(__A ):
lowerCAmelCase_ :Optional[Any] = DPTForDepthEstimation(__A )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
lowerCAmelCase_ :str = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__A )
lowerCAmelCase_ :Any = prepare_img()
lowerCAmelCase_ :str = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(**__A )
lowerCAmelCase_ :str = outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase_ :Tuple = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __A )
lowerCAmelCase_ :int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __A , atol=1E-4 ) )
| 256
| 1
|
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : Union[str, Any]=0.01 , __lowercase : str=10_00 ):
"""simple docstring"""
snake_case_ = p_stop
snake_case_ = max_length
def __iter__( self : Tuple ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = False
while not stop and count < self.max_length:
yield count
count += 1
snake_case_ = random.random() < self.p_stop
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : str , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Union[str, Any]=False , __lowercase : Dict=True ):
"""simple docstring"""
snake_case_ = [
BatchSamplerShard(__A , 2 , __A , split_batches=__A , even_batches=__A )
for i in range(2 )
]
snake_case_ = [list(__A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__A ) for shard in batch_sampler_shards] , [len(__A ) for e in expected] )
self.assertListEqual(__A , __A )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__A , __A )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__A , __A )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__A , __A )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__A , __A )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
snake_case_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__A , __A )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__A , __A )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
snake_case_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__A , __A , split_batches=__A )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__A )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__A )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__A , __A , even_batches=__A )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__A )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__A )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__A , __A , split_batches=__A , even_batches=__A )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
snake_case_ = [BatchSamplerShard(__A , 2 , __A , even_batches=__A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def snake_case__ ( self : int , __lowercase : List[str] , __lowercase : int , __lowercase : List[str] , __lowercase : Optional[int]=False , __lowercase : List[str]=2 , __lowercase : Optional[Any]=False ):
"""simple docstring"""
random.seed(__A )
snake_case_ = list(__A )
snake_case_ = [
IterableDatasetShard(
__A , batch_size=__A , drop_last=__A , num_processes=__A , process_index=__A , split_batches=__A , )
for i in range(__A )
]
snake_case_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__A )
iterable_dataset_lists.append(list(__A ) )
snake_case_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
snake_case_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(len(__A ) % shard_batch_size == 0 )
snake_case_ = []
for idx in range(0 , len(__A ) , __A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__A ) < len(__A ):
reference += reference
self.assertListEqual(__A , reference[: len(__A )] )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = RandomIterableDataset()
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
# Edge case with a very small dataset
snake_case_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
self.check_iterable_dataset_shards(__A , __A , batch_size=4 , drop_last=__A , split_batches=__A )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = BatchSampler(range(16 ) , batch_size=4 , drop_last=__A )
snake_case_ = SkipBatchSampler(__A , 2 )
self.assertListEqual(list(__A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = DataLoader(list(range(16 ) ) , batch_size=4 )
snake_case_ = skip_first_batches(__A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def snake_case__ ( self : Dict ):
"""simple docstring"""
Accelerator()
snake_case_ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 711
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if equation(_A ) * equation(_A ) >= 0:
raise ValueError("Wrong space!" )
snake_case_ = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
snake_case_ = c
else:
snake_case_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 139
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Tuple = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
A : Tuple = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCamelCase__ ( ) -> str:
_lowercase = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE_ ) )
_lowercase = os.path.join(SCREAMING_SNAKE_CASE_ , """words.txt""" )
_lowercase = """"""
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowercase = f.readline()
_lowercase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_lowercase = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 287
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case_ : List[str] = logging.getLogger(__name__)
def __UpperCAmelCase ( snake_case_ : str=2 , snake_case_ : Union[str, Any]=3 , snake_case_ : List[str]=1_6 , snake_case_ : int = 1_0 , snake_case_ : int = 2 ):
'''simple docstring'''
def get_dataset(snake_case_ : Union[str, Any] ):
UpperCAmelCase: List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase: List[Any] = get_dataset(snake_case_ )
UpperCAmelCase: int = get_dataset(snake_case_ )
UpperCAmelCase: int = DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
UpperCAmelCase: List[str] = DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Any=None ):
'''simple docstring'''
UpperCAmelCase: str = []
for epoch in range(snake_case_ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase: Tuple = batch
UpperCAmelCase: Tuple = model(snake_case_ )
UpperCAmelCase: Optional[int] = torch.nn.functional.mse_loss(snake_case_ , snake_case_ )
accelerator.backward(snake_case_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase ( nn.Module ):
def __init__( self ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCAmelCase: List[str] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase: List[str] = nn.Parameter(torch.randn(1 ) )
def A__ ( self , __snake_case ) -> List[Any]:
"""simple docstring"""
return x * self.a + self.b
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase: int = DummyModel()
UpperCAmelCase: Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase: List[Any] = dummy_dataloaders()
UpperCAmelCase: List[str] = ProjectConfiguration(total_limit=1 , project_dir=__snake_case , automatic_checkpoint_naming=__snake_case )
# Train baseline
UpperCAmelCase: Optional[int] = Accelerator(project_config=__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: List[Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase: List[Any] = DummyModel()
UpperCAmelCase: Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase: Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase: Optional[Any] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: str = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
UpperCAmelCase: Optional[Any] = os.path.join(__snake_case , "initial" )
accelerator.save_state(__snake_case )
((UpperCAmelCase) , (UpperCAmelCase)): Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase: str = optimizer.state_dict()
UpperCAmelCase: str = train(3 , __snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase) , (UpperCAmelCase)): Dict = model.a.item(), model.b.item()
UpperCAmelCase: List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase: List[Any] = DummyModel()
UpperCAmelCase: Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = dummy_dataloaders()
UpperCAmelCase: List[str] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Dict = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.load_state(__snake_case )
((UpperCAmelCase) , (UpperCAmelCase)): Dict = model.a.item(), model.b.item()
UpperCAmelCase: str = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
UpperCAmelCase: Optional[int] = train(2 , __snake_case , __snake_case , __snake_case , __snake_case )
# Save everything
UpperCAmelCase: str = os.path.join(__snake_case , "checkpoint" )
accelerator.save_state(__snake_case )
# Load everything back in and make sure all states work
accelerator.load_state(__snake_case )
test_rands += train(1 , __snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase) , (UpperCAmelCase)): Dict = model.a.item(), model.b.item()
UpperCAmelCase: Union[str, Any] = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
def A__ ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase: Optional[Any] = DummyModel()
UpperCAmelCase: Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase: List[Any] = dummy_dataloaders()
UpperCAmelCase: Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
UpperCAmelCase: Union[str, Any] = Accelerator(project_dir=__snake_case , project_config=__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Optional[int] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)): Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase: Tuple = optimizer.state_dict()
UpperCAmelCase: Any = train(3 , __snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase) , (UpperCAmelCase)): List[Any] = model.a.item(), model.b.item()
UpperCAmelCase: str = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase: Dict = DummyModel()
UpperCAmelCase: Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase: Optional[Any] = dummy_dataloaders()
UpperCAmelCase: List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__snake_case )
UpperCAmelCase: Tuple = Accelerator(project_dir=__snake_case , project_config=__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Dict = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.load_state(os.path.join(__snake_case , "checkpoints" , "checkpoint_0" ) )
((UpperCAmelCase) , (UpperCAmelCase)): str = model.a.item(), model.b.item()
UpperCAmelCase: int = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
UpperCAmelCase: Optional[Any] = train(2 , __snake_case , __snake_case , __snake_case , __snake_case )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , __snake_case , __snake_case , __snake_case , __snake_case )
((UpperCAmelCase) , (UpperCAmelCase)): Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase: str = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase: Dict = torch.tensor([1, 2, 3] )
UpperCAmelCase: int = torch.tensor([2, 3, 4] )
UpperCAmelCase: Optional[int] = DummyModel()
UpperCAmelCase: Dict = torch.optim.Adam(net.parameters() )
UpperCAmelCase: Tuple = Accelerator()
with self.assertRaises(__snake_case ) as ve:
accelerator.register_for_checkpointing(__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase: int = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase: List[str] = DummyModel()
UpperCAmelCase: Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase: Union[str, Any] = torch.optim.lr_scheduler.StepLR(__snake_case , step_size=1 , gamma=0.99 )
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = dummy_dataloaders()
UpperCAmelCase: int = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
UpperCAmelCase: str = Accelerator(project_dir=__snake_case , project_config=__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Dict = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
accelerator.save_state()
UpperCAmelCase: Optional[Any] = scheduler.state_dict()
train(3 , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.assertNotEqual(__snake_case , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(__snake_case , scheduler.state_dict() )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase: List[str] = DummyModel()
UpperCAmelCase: str = ProjectConfiguration(automatic_checkpoint_naming=__snake_case , total_limit=2 )
# Train baseline
UpperCAmelCase: Optional[Any] = Accelerator(project_dir=__snake_case , project_config=__snake_case )
UpperCAmelCase: Any = accelerator.prepare(__snake_case )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__snake_case , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: str = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : int = '/tmp/accelerate/state_checkpointing'
snake_case_ : Union[str, Any] = DummyModel()
snake_case_ : str = torch.optim.Adam(params=model.parameters(), lr=1e-3)
snake_case_ : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case_ , snake_case_ : Dict = dummy_dataloaders()
snake_case_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case_ : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case_ , snake_case_ : List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case_ : Tuple = group['params'][0].device
break
assert param_device.type == accelerator.device.type
snake_case_ : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
snake_case_ : Dict = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
snake_case_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 166
|
# Imports
import numpy as np
class __lowerCamelCase :
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def A__ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None ) -> Dict:
"""simple docstring"""
if red is not None:
UpperCAmelCase: Optional[Any] = red
if green is not None:
UpperCAmelCase: List[Any] = green
if blue is not None:
UpperCAmelCase: Any = blue
if red_edge is not None:
UpperCAmelCase: Optional[int] = red_edge
if nir is not None:
UpperCAmelCase: int = nir
return True
def A__ ( self , __snake_case="" , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None ) -> List[str]:
"""simple docstring"""
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
UpperCAmelCase: Dict = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def A__ ( self ) -> Any:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A__ ( self ) -> List[str]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A__ ( self ) -> str:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A__ ( self , __snake_case=0.08 , __snake_case=1.22 , __snake_case=0.03 ) -> Optional[int]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.nir / self.green) - 1
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def A__ ( self ) -> str:
"""simple docstring"""
return (self.red - self.blue) / self.red
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A__ ( self ) -> List[str]:
"""simple docstring"""
return self.nir - self.green
def A__ ( self ) -> Dict:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A__ ( self , __snake_case=0.16 ) -> Tuple:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def A__ ( self , __snake_case=0.5 ) -> Tuple:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A__ ( self ) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A__ ( self , __snake_case=None , __snake_case=None ) -> int:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A__ ( self ) -> Any:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def A__ ( self ) -> str:
"""simple docstring"""
return self.nir / self.red
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def A__ ( self ) -> str:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A__ ( self ) -> str:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def A__ ( self ) -> int:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def A__ ( self ) -> Dict:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase: int = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase: Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.nir / self.red
def A__ ( self ) -> List[str]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def A__ ( self ) -> Any:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 166
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : int =XLNetTokenizer
__lowerCamelCase : str =XLNetTokenizerFast
__lowerCamelCase : Dict =True
__lowerCamelCase : List[str] =True
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = XLNetTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = """<s>"""
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__lowercase ) , 1006 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = XLNetTokenizer(__lowercase , keep_accents=__lowercase )
__a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [285, 46, 10, 170, 382] )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__a = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = XLNetTokenizer(__lowercase , do_lower_case=__lowercase )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = XLNetTokenizer(__lowercase , do_lower_case=__lowercase )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__a = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
__a = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
__a = tokenizer.build_inputs_with_special_tokens(__lowercase )
__a = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
__a = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 225
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =GPTSanJapaneseTokenizer
__lowerCamelCase : List[Any] =False
__lowerCamelCase : List[str] ={'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
super().setUp()
# fmt: off
__a = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCamelCase_ ( self : Dict , **__lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__a = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a , __a = self.get_input_output_texts(__lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、世界。 こんばんは、㔺界。"""
__a = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__a = tokens + [tokenizer.unk_token]
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__a = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__a = tokenizer.encode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = """こんにちは、世界。こんばんは、世界。😀"""
__a = tokenizer.encode(prefix_text + input_text )
__a = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__a = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = [1] + [0] * (len_prefix + len_text + 1)
__a = [1] * (len_prefix + len_text + 1) + [0]
__a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a = tokenizer(prefix_text + input_text ).token_type_ids
__a = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__a = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = tokenizer.encode("""あンいワ""" )
__a = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__a = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__a = tokenizer(__lowercase , padding=__lowercase )
__a = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__a = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# tokenizer has no padding token
pass
| 225
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = MBartConfig
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : str = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , ) -> Optional[Any]:
lowercase_ : Union[str, Any] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : int = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Tuple = eos_token_id
lowercase_ : int = pad_token_id
lowercase_ : List[str] = bos_token_id
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ : int = prepare_mbart_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
lowercase_ : Any = TFMBartModel(config=UpperCAmelCase_ ).get_decoder()
lowercase_ : Optional[int] = inputs_dict['input_ids']
lowercase_ : Dict = input_ids[:1, :]
lowercase_ : Union[str, Any] = inputs_dict['attention_mask'][:1, :]
lowercase_ : Optional[Any] = inputs_dict['head_mask']
lowercase_ : int = 1
# first forward pass
lowercase_ : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
lowercase_ , lowercase_ : Dict = outputs.to_tuple()
lowercase_ : List[str] = past_key_values[1]
def _UpperCAmelCase ( a : Tuple , a : Dict , a : Optional[Any] , a : Optional[int]=None , a : Any=None , a : List[str]=None , a : int=None , a : Tuple=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
lowercase_ : Optional[Any] = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Dict = TFMBartModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
SCREAMING_SNAKE_CASE_ : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = """facebook/mbart-large-en-ro"""
@cached_property
def lowerCamelCase__ ( self ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self ) -> str:
lowercase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase__ ( self , **_lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = self.translate_src_text(**UpperCAmelCase_ )
self.assertListEqual(self.expected_text , UpperCAmelCase_ )
def lowerCamelCase__ ( self , **_lowercase ) -> List[Any]:
lowercase_ : Tuple = self.tokenizer(self.src_text , **UpperCAmelCase_ , return_tensors='tf' )
lowercase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase_ : Dict = self.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
return generated_words
@slow
def lowerCamelCase__ ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 710
|
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7
| 0
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __lowerCAmelCase ( lowercase : Tuple , lowercase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = Mock()
snake_case : Tuple = conn, Mock()
snake_case : Optional[int] = iter([1, None] )
snake_case : Any = lambda lowercase : next(lowercase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 178
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 178
| 1
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCAmelCase : int = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase : int = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase : int = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase : Optional[int] = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase : Dict = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase : Any = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase : str = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase : Tuple = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase : Any = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase : str = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase : Optional[int] = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase : Dict = re.compile(r'''^\s*else:''')
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase ) is None:
return None
__lowerCAmelCase = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
with open(lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
__lowerCAmelCase = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
__lowerCAmelCase = re.findall(r"\[([^\]]+)\]" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
__lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
__lowerCAmelCase = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(", " )
__lowerCAmelCase = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
__lowerCAmelCase = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(", " )
__lowerCAmelCase = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Dict ):
'''simple docstring'''
def find_duplicates(lowerCamelCase : Tuple ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase = []
for key in import_dict_objects.keys():
__lowerCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__lowerCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
__lowerCAmelCase = os.path.join(lowerCamelCase , "__init__.py" )
__lowerCAmelCase = parse_init(lowerCamelCase )
if objects is not None:
__lowerCAmelCase = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
__lowerCAmelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("\n\n".join(lowerCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
__lowerCAmelCase = short_path.replace(os.path.sep , "." )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
__lowerCAmelCase = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCAmelCase : List[str] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowerCAmelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
__lowerCAmelCase = direct_transformers_import(lowerCamelCase )
__lowerCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , "__init__.py" ) , "r" ) as f:
__lowerCAmelCase = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , lowerCamelCase ) ) )
__lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
__lowerCAmelCase = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 39
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 557
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :Tuple , a :float ) -> float:
return 0.0
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int) -> tuple[int | float, int | float]:
'''simple docstring'''
__UpperCamelCase : List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])])
__UpperCamelCase : Any = max([20, np.max(fft_results[1 : samplerate // 2 - 1])])
return lowest, highest
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : FilterType , _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : List[str] = 512
__UpperCamelCase : List[Any] = [1] + [0] * (size - 1)
__UpperCamelCase : List[Any] = [filter_type.process(_lowerCamelCase) for item in inputs]
__UpperCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.abs(np.fft.fft(_lowerCamelCase))
__UpperCamelCase : Optional[int] = 20 * np.logaa(_lowerCamelCase)
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
# Display within reasonable bounds
__UpperCamelCase : Optional[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase)
plt.ylim(max([-80, bounds[0]]) , min([80, bounds[1]]))
plt.ylabel("Gain (dB)")
plt.plot(_lowerCamelCase)
plt.show()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : FilterType , _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : Any = 512
__UpperCamelCase : Dict = [1] + [0] * (size - 1)
__UpperCamelCase : Tuple = [filter_type.process(_lowerCamelCase) for item in inputs]
__UpperCamelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.angle(np.fft.fft(_lowerCamelCase))
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
plt.ylim(-2 * pi , 2 * pi)
plt.ylabel("Phase shift (Radians)")
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi))
plt.show()
| 557
| 1
|
def __lowercase( __snake_case : float ,__snake_case : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 345
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowercase( __snake_case : Tuple ) -> str:
__snake_case = SwinvaConfig()
__snake_case = swinva_name.split('_' )
__snake_case = name_split[1]
if "to" in name_split[3]:
__snake_case = int(name_split[3][-3:] )
else:
__snake_case = int(name_split[3] )
if "to" in name_split[2]:
__snake_case = int(name_split[2][-2:] )
else:
__snake_case = int(name_split[2][6:] )
if model_size == "tiny":
__snake_case = 96
__snake_case = (2, 2, 6, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "small":
__snake_case = 96
__snake_case = (2, 2, 18, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "base":
__snake_case = 1_28
__snake_case = (2, 2, 18, 2)
__snake_case = (4, 8, 16, 32)
else:
__snake_case = 1_92
__snake_case = (2, 2, 18, 2)
__snake_case = (6, 12, 24, 48)
if "to" in swinva_name:
__snake_case = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__snake_case = 2_18_41
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-22k-id2label.json'
__snake_case = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='dataset' ) ,'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
else:
__snake_case = 10_00
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='dataset' ) ,'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = img_size
__snake_case = num_classes
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
return config
def __lowercase( __snake_case : List[str] ) -> Any:
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' ,'embeddings.norm' )
if "layers" in name:
__snake_case = 'encoder.' + name
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' ,'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' ,'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' ,'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' ,'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' ,'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' ,'continuous_position_bias_mlp' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "head" in name:
__snake_case = name.replace('head' ,'classifier' )
else:
__snake_case = 'swinv2.' + name
return name
def __lowercase( __snake_case : str ,__snake_case : Optional[int] ) -> str:
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(__snake_case )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[3] )
__snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[
dim : dim * 2
]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def __lowercase( __snake_case : Tuple ,__snake_case : Any ) -> Tuple:
__snake_case = timm.create_model(__snake_case ,pretrained=__snake_case )
timm_model.eval()
__snake_case = get_swinva_config(__snake_case )
__snake_case = SwinvaForImageClassification(__snake_case )
model.eval()
__snake_case = convert_state_dict(timm_model.state_dict() ,__snake_case )
model.load_state_dict(__snake_case )
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' ,'-' ) ) )
__snake_case = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
__snake_case = image_processor(images=__snake_case ,return_tensors='pt' )
__snake_case = timm_model(inputs['pixel_values'] )
__snake_case = model(**__snake_case ).logits
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.push_to_hub(
repo_path_or_name=Path(__snake_case ,__snake_case ) ,organization='nandwalritik' ,commit_message='Add model' ,)
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase_ : Tuple = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 345
| 1
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = [1]
for i in range(2 , _UpperCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ : int = []
snake_case_ : Dict = list(range(_UpperCamelCase ) )
# Find permutation
while factorials:
snake_case_ : int = factorials.pop()
snake_case_ , snake_case_ : Tuple = divmod(_UpperCamelCase , _UpperCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
def __lowerCAmelCase ( A_ : str ) -> str:
__UpperCAmelCase = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __lowerCAmelCase ( A_ : str ) -> dict[str, str]:
__UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__UpperCAmelCase = remove_duplicates(key.upper() )
__UpperCAmelCase = len(A_ )
# First fill cipher with key characters
__UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(A_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A_ ) , 26 ):
__UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCAmelCase = alphabet[i - offset]
__UpperCAmelCase = char
return cipher_alphabet
def __lowerCAmelCase ( A_ : str , A_ : dict[str, str] ) -> str:
return "".join(cipher_map.get(A_ , A_ ) for ch in message.upper() )
def __lowerCAmelCase ( A_ : str , A_ : dict[str, str] ) -> str:
__UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A_ , A_ ) for ch in message.upper() )
def __lowerCAmelCase ( ) -> None:
__UpperCAmelCase = input("Enter message to encode or decode: " ).strip()
__UpperCAmelCase = input("Enter keyword: " ).strip()
__UpperCAmelCase = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__UpperCAmelCase = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__UpperCAmelCase = create_cipher_map(A_ )
print(func(A_ , A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221
| 0
|
import random
class __lowercase :
@staticmethod
def __magic_name__ ( lowercase__ : str ):
a_ = [ord(__UpperCamelCase ) for i in text]
a_ = []
a_ = []
for i in plain:
a_ = random.randint(1 , 3_0_0 )
a_ = (i + k) * k
cipher.append(__UpperCamelCase )
key.append(__UpperCamelCase )
return cipher, key
@staticmethod
def __magic_name__ ( lowercase__ : list[int] , lowercase__ : list[int] ):
a_ = []
for i in range(len(__UpperCamelCase ) ):
a_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__UpperCamelCase ) )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 719
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any=7 , lowercase__ : Any=3 , lowercase__ : str=3_0 , lowercase__ : int=4_0_0 , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=None , lowercase__ : str=True , lowercase__ : Dict=1 / 2_5_5 , lowercase__ : List[Any]=True , lowercase__ : Dict=[0.5, 0.5, 0.5] , lowercase__ : List[str]=[0.5, 0.5, 0.5] , lowercase__ : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_pad
def __magic_name__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __magic_name__ ( self : Tuple , lowercase__ : Optional[int] , lowercase__ : Optional[int]=False ):
if not batched:
a_ = image_inputs[0]
if isinstance(lowercase__ , Image.Image ):
a_ , a_ = image.size
else:
a_ , a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size['''shortest_edge'''] * h / w )
a_ = self.size['''shortest_edge''']
elif w > h:
a_ = self.size['''shortest_edge''']
a_ = int(self.size['''shortest_edge'''] * w / h )
else:
a_ = self.size['''shortest_edge''']
a_ = self.size['''shortest_edge''']
else:
a_ = []
for image in image_inputs:
a_ , a_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ = max(lowercase__ , key=lambda lowercase__ : item[0] )[0]
a_ = max(lowercase__ , key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a__ , unittest.TestCase ):
_lowerCAmelCase = DetrImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Dict ):
a_ = DetrImageProcessingTester(self )
@property
def __magic_name__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[int] ):
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowercase__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_pad''' ) )
def __magic_name__ ( self : Any ):
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase__ )
a_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase__ )
def __magic_name__ ( self : str ):
pass
def __magic_name__ ( self : List[Any] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self : Optional[int] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self : List[str] ):
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __magic_name__ ( self : Dict ):
# prepare image and target
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a_ = json.loads(f.read() )
a_ = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
a_ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
a_ = image_processing(images=lowercase__ , annotations=lowercase__ , return_tensors='''pt''' )
# verify pixel values
a_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase__ )
a_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase__ )
a_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase__ ) )
# verify class_labels
a_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase__ ) )
# verify orig_size
a_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase__ ) )
# verify size
a_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase__ ) )
@slow
def __magic_name__ ( self : str ):
# prepare image, target and masks_path
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a_ = json.loads(f.read() )
a_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
a_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a_ = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
a_ = image_processing(images=lowercase__ , annotations=lowercase__ , masks_path=lowercase__ , return_tensors='''pt''' )
# verify pixel values
a_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowercase__ )
a_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase__ )
a_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase__ ) )
# verify class_labels
a_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase__ ) )
# verify masks
a_ = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowercase__ )
# verify orig_size
a_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase__ ) )
# verify size
a_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase__ ) )
| 143
| 0
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict=None , snake_case_ : Optional[int]=None ) ->int:
if "." in tensor_name:
lowerCamelCase__ : Union[str, Any] =tensor_name.split('.' )
for split in splits[:-1]:
lowerCamelCase__ : Any =getattr(__snake_case , __snake_case )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCamelCase__ : int =new_module
lowerCamelCase__ : Union[str, Any] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
lowerCamelCase__ : Any =tensor_name in module._buffers
lowerCamelCase__ : Optional[int] =getattr(__snake_case , __snake_case )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
lowerCamelCase__ : Any =False
lowerCamelCase__ : Optional[int] =False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase__ : Tuple =False
lowerCamelCase__ : int =False
else:
lowerCamelCase__ : Any =hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCamelCase__ : Tuple =isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase__ : Tuple =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase__ : int =old_value.to(__snake_case )
elif isinstance(__snake_case , torch.Tensor ):
lowerCamelCase__ : Optional[int] =value.to('cpu' )
if value.dtype == torch.inta:
lowerCamelCase__ : List[Any] =version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
lowerCamelCase__ : Optional[Any] =torch.tensor(__snake_case , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __snake_case ) and fpaa_statistics is None:
lowerCamelCase__ : Tuple =new_value.T
lowerCamelCase__ : int =old_value.__dict__
if is_abit:
lowerCamelCase__ : List[str] =bnb.nn.IntaParams(__snake_case , requires_grad=__snake_case , **__snake_case ).to(__snake_case )
elif is_abit:
lowerCamelCase__ : Optional[int] =bnb.nn.Paramsabit(__snake_case , requires_grad=__snake_case , **__snake_case ).to(__snake_case )
lowerCamelCase__ : Tuple =new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__snake_case ) )
else:
if value is None:
lowerCamelCase__ : int =old_value.to(__snake_case )
elif isinstance(__snake_case , torch.Tensor ):
lowerCamelCase__ : Optional[int] =value.to(__snake_case )
else:
lowerCamelCase__ : Union[str, Any] =torch.tensor(__snake_case , device=__snake_case )
if is_buffer:
lowerCamelCase__ : Optional[int] =new_value
else:
lowerCamelCase__ : Union[str, Any] =nn.Parameter(__snake_case , requires_grad=old_value.requires_grad )
lowerCamelCase__ : Dict =new_value
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : Tuple=False ) ->Dict:
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : List[Any] =[]
current_key_name.append(__snake_case )
if (isinstance(__snake_case , nn.Linear ) or isinstance(__snake_case , __snake_case )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__snake_case ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase__ : List[str] =module.weight.shape
else:
lowerCamelCase__ : Optional[Any] =module.in_features
lowerCamelCase__ : str =module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase__ : Tuple =bnb.nn.LinearabitLt(
__snake_case , __snake_case , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCamelCase__ : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase__ : Optional[Any] =bnb.nn.Linearabit(
__snake_case , __snake_case , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCamelCase__ : Union[str, Any] =True
# Store the module class in case we need to transpose the weight later
lowerCamelCase__ : str =type(__snake_case )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__snake_case )
if len(list(module.children() ) ) > 0:
lowerCamelCase__ : Dict =_replace_with_bnb_linear(
__snake_case , __snake_case , __snake_case , __snake_case , has_been_replaced=__snake_case , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None , snake_case_ : Optional[Any]=None ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase__ : Dict =_replace_with_bnb_linear(
__snake_case , __snake_case , __snake_case , __snake_case )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCAmelCase_ ( *snake_case_ : Union[str, Any] , **snake_case_ : List[str] ) ->List[str]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __snake_case , )
return replace_with_bnb_linear(*__snake_case , **__snake_case )
def lowerCAmelCase_ ( *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ) ->Dict:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __snake_case , )
return set_module_quantized_tensor_to_device(*__snake_case , **__snake_case )
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Optional[Any]:
lowerCamelCase__ : Optional[Any] =deepcopy(__snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase__ : Optional[int] =find_tied_parameters(__snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(__snake_case , __snake_case ):
lowerCamelCase__ : Dict =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Optional[Any] =sum(__snake_case , [] )
lowerCamelCase__ : Any =len(__snake_case ) > 0
# Check if it is a base model
lowerCamelCase__ : Union[str, Any] =not hasattr(__snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : int =list(model.named_children() )
lowerCamelCase__ : Dict =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Dict =set(__snake_case ) - set(__snake_case )
lowerCamelCase__ : Dict =list(set(__snake_case ) ) + list(__snake_case )
# remove ".weight" from the keys
lowerCamelCase__ : List[Any] =[""".weight""", """.bias"""]
lowerCamelCase__ : Tuple =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Dict =name.replace(__snake_case , '' )
filtered_module_names.append(__snake_case )
return filtered_module_names
| 174
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 10, __snake_case : int = 22 ) -> int:
"""simple docstring"""
A__ : Any =range(1, __snake_case )
A__ : List[str] =range(1, __snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 215
| 0
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER
__UpperCAmelCase = True
__UpperCAmelCase = "ml.p3.2xlarge"
__UpperCAmelCase = "accelerate_sagemaker_execution_role"
__UpperCAmelCase = "hf-sm"
__UpperCAmelCase = "us-east-1"
__UpperCAmelCase = 1
__UpperCAmelCase = "accelerate-sagemaker-1"
__UpperCAmelCase = "1.6"
__UpperCAmelCase = "4.4"
__UpperCAmelCase = "train.py"
__UpperCAmelCase = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__UpperCAmelCase = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , snake_case_ )
assert isinstance(converted_args['''do_train'''] , snake_case_ )
assert isinstance(converted_args['''epochs'''] , snake_case_ )
assert isinstance(converted_args['''learning_rate'''] , snake_case_ )
assert isinstance(converted_args['''max_steps'''] , snake_case_ )
with pytest.raises(snake_case_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 716
|
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 527
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = '''▁'''
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCamelCase__ = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCamelCase__ = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCamelCase__ = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
lowerCamelCase__ = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class SCREAMING_SNAKE_CASE ( __a ):
__lowerCamelCase : List[str] =["input_ids"]
__lowerCamelCase : str =VOCAB_FILES_NAMES
__lowerCamelCase : Tuple =PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str =RESOURCE_FILES_NAMES
def __init__( self : Optional[int] , __lowercase : int , __lowercase : Optional[int]=None , __lowercase : Tuple=False , __lowercase : List[Any]="utf8" , __lowercase : Tuple="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : Optional[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Any , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , vocab_file=lowerCAmelCase__ , encoding=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__a = do_lower_case
__a = sentencepiece_model_ckpt
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__a = self.load_vocab(filepath=lowerCAmelCase__ )
else:
__a = {self.sp_model.id_to_piece(lowerCAmelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
__a = {v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self : str , __lowercase : Any ):
'''simple docstring'''
if text is None:
return None
__a = self.tokenize(lowerCAmelCase__ )
__a , __a = """""", []
for i, ch in enumerate(lowerCAmelCase__ ):
if ch in self.SP_CHAR_MAPPING:
__a = self.SP_CHAR_MAPPING.get(lowerCAmelCase__ )
else:
__a = unicodedata.normalize("""NFKC""" , lowerCAmelCase__ )
if self.is_whitespace(lowerCAmelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowerCAmelCase__ ) )
__a , __a , __a = normalized_text, [], 0
if self.do_lower_case:
__a = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__a = token[1:]
__a = text[offset:].index(lowerCAmelCase__ ) + offset
__a = start + len(lowerCAmelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__a = end
return token_mapping
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Tuple ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[str, Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(lowerCAmelCase__ , lowerCAmelCase__ ) for c in text) )
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Optional[Any]=False , __lowercase : List[str]=64 , __lowercase : List[Any]=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__a = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__a = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__a = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__a = self.sp_model.EncodeAsPieces(lowerCAmelCase__ )
else:
__a = self.sp_model.SampleEncodeAsPieces(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = []
for pi, piece in enumerate(lowerCAmelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowerCAmelCase__ ) and pi != 0:
new_pieces.append(lowerCAmelCase__ )
continue
else:
continue
__a = 0
for i, chunk in enumerate(lowerCAmelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowerCAmelCase__ ) or self.is_punct(lowerCAmelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowerCAmelCase__ )
__a = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__a = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__a = i
if len(lowerCAmelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def UpperCamelCase_ ( self : Tuple , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.convert_ids_to_tokens(lowerCAmelCase__ )
__a = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(lowerCAmelCase__ , self.unk_token )
def UpperCamelCase_ ( self : List[str] , __lowercase : Dict , __lowercase : Optional[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self : Dict , __lowercase : Dict , __lowercase : List[str]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self : Any , __lowercase : Tuple , __lowercase : List[str]=None , __lowercase : Any=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowerCAmelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowerCAmelCase__ ) + 1) + [1] * (len(lowerCAmelCase__ ) + 3)
def UpperCamelCase_ ( self : List[str] , __lowercase : List[str] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Dict ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self : Dict , __lowercase : Optional[Any] ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self : List[str] , __lowercase : Optional[Any] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowerCAmelCase__ ) == 1:
__a = unicodedata.category(lowerCAmelCase__ )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self : Any , __lowercase : Dict ):
'''simple docstring'''
__a = {}
with io.open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowerCAmelCase__ ):
__a = line.rstrip("""\n""" )
__a = int(lowerCAmelCase__ )
return token_to_idx
def UpperCamelCase_ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
__a = 0
if os.path.isdir(lowerCAmelCase__ ):
__a = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__a = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
__a = token_index
writer.write(token + """\n""" )
index += 1
__a = os.path.join(lowerCAmelCase__ , """sentencepiece.bpe.model""" )
with open(lowerCAmelCase__ , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (vocab_file,)
| 225
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[Any] ) -> None:
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 214
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class _UpperCamelCase( __A ):
__SCREAMING_SNAKE_CASE : Tuple = """imagegpt"""
__SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE : Any = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=5_1_2 + 1 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 * 3_2 , SCREAMING_SNAKE_CASE__ : str=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]="quick_gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : int=False , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
__a : Union[str, Any] = vocab_size
__a : str = n_positions
__a : Dict = n_embd
__a : Tuple = n_layer
__a : List[str] = n_head
__a : List[Any] = n_inner
__a : Union[str, Any] = activation_function
__a : List[str] = resid_pdrop
__a : Optional[Any] = embd_pdrop
__a : Any = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : Optional[Any] = initializer_range
__a : Union[str, Any] = scale_attn_weights
__a : Dict = use_cache
__a : Dict = scale_attn_by_inverse_layer_idx
__a : Union[str, Any] = reorder_and_upcast_attn
__a : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class _UpperCamelCase( __A ):
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple = 1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : Any = False , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : Any = 3 , SCREAMING_SNAKE_CASE__ : List[str] = 3_2 , SCREAMING_SNAKE_CASE__ : List[str] = 3_2 , ):
'''simple docstring'''
__a : Dict = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__a : List[Any] = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return inputs
| 709
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return 0.0
def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : int ):
__a : Tuple = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__a : Union[str, Any] = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase__ ( lowerCamelCase_ : FilterType , lowerCamelCase_ : int ):
__a : Optional[Any] = 5_1_2
__a : Dict = [1] + [0] * (size - 1)
__a : List[Any] = [filter_type.process(lowerCamelCase_ ) for item in inputs]
__a : str = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Any = np.abs(np.fft.fft(lowerCamelCase_ ) )
__a : str = 2_0 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__a : Any = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(lowerCamelCase_ )
plt.show()
def UpperCAmelCase__ ( lowerCamelCase_ : FilterType , lowerCamelCase_ : int ):
__a : Dict = 5_1_2
__a : Union[str, Any] = [1] + [0] * (size - 1)
__a : Any = [filter_type.process(lowerCamelCase_ ) for item in inputs]
__a : Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : int = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show()
| 577
| 0
|
def _A ( _lowercase = 10_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 3
__UpperCamelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1
| 1
|
import os
import sys
import unittest
_a : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Any = os.path.join(git_repo_path, 'src', 'transformers')
_a : str = "\n{0} = None\n"
_a : Dict = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_a : Optional[Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
__lowerCamelCase = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(A_ )
__lowerCamelCase = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(A_ , """tokenizers""" )
__lowerCamelCase = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(A_ , """tensorflow_text""" )
__lowerCamelCase = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(A_ , """sentencepiece_and_tokenizers""" )
__lowerCamelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(A_ , """sentencepiece_and_tensorflow_text""" )
__lowerCamelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(A_ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase_ ( self ):
__lowerCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , A_ )
self.assertIn("""tensorflow_text""" , A_ )
self.assertIn("""sentencepiece_and_tokenizers""" , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase_ ( self ):
__lowerCamelCase = create_dummy_object("""CONSTANT""" , """\'torch\'""" )
self.assertEqual(A_ , """\nCONSTANT = None\n""" )
__lowerCamelCase = create_dummy_object("""function""" , """\'torch\'""" )
self.assertEqual(
A_ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""" )
__lowerCamelCase = """
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
"""
__lowerCamelCase = create_dummy_object("""FakeClass""" , """\'torch\'""" )
self.assertEqual(A_ , A_ )
def lowerCamelCase_ ( self ):
__lowerCamelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
__lowerCamelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , A_ )
| 705
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_a : List[str] = datasets.logging.get_logger(__name__)
_a : Optional[Any] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_a : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_a : Dict = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_a : List[Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase_ ( self , UpperCAmelCase ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
__lowerCamelCase = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCamelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCamelCase = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCamelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowerCamelCase = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase )
return {"scores": scores}
| 571
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Tuple = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 57
| 1
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False, False, False
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
# Automatically constructed
SCREAMING_SNAKE_CASE = '''dict'''
SCREAMING_SNAKE_CASE = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
SCREAMING_SNAKE_CASE = field(default='''Audio''' , init=snake_case , repr=snake_case )
def __call__( self ):
return self.pa_type
def _UpperCamelCase ( self ,A ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(A ,A ):
return {"bytes": None, "path": value}
elif isinstance(A ,A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase = BytesIO()
sf.write(A ,value["""array"""] ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase = np.frombuffer(value["""bytes"""] ,dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
UpperCAmelCase = np.memmap(value["""path"""] ,dtype="""h""" ,mode="""r""" ).astype(np.floataa ) / 32_767
UpperCAmelCase = BytesIO(bytes() )
sf.write(A ,A ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _UpperCamelCase ( self ,A ,A = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
UpperCAmelCase , UpperCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
UpperCAmelCase = xsplitext(A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
UpperCAmelCase = token_per_repo_id or {}
UpperCAmelCase = path.split("""::""" )[-1]
try:
UpperCAmelCase = string_to_dict(A ,config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase = None
with xopen(A ,"""rb""" ,use_auth_token=A ) as f:
UpperCAmelCase , UpperCAmelCase = sf.read(A )
else:
UpperCAmelCase , UpperCAmelCase = sf.read(A )
UpperCAmelCase = array.T
if self.mono:
UpperCAmelCase = librosa.to_mono(A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase = librosa.resample(A ,orig_sr=A ,target_sr=self.sampling_rate )
UpperCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _UpperCamelCase ( self ,A ):
if pa.types.is_string(storage.type ):
UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.binary() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
UpperCAmelCase = pa.array([Audio().encode_example(A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCAmelCase = storage.field("""bytes""" )
else:
UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCAmelCase = storage.field("""path""" )
else:
UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
return array_cast(A ,self.pa_type )
def _UpperCamelCase ( self ,A ):
@no_op_if_value_is_null
def path_to_bytes(A ):
with xopen(A ,"""rb""" ) as f:
UpperCAmelCase = f.read()
return bytes_
UpperCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(A ,self.pa_type )
| 708
|
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74
| 0
|
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowercase ( SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('tpu-config' , description=_description )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__a , default=__a , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__a , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__a , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
SCREAMING_SNAKE_CASE_ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__a , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def lowercase ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__a ):
SCREAMING_SNAKE_CASE_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_ = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_ = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_ = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_ = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __a ):
SCREAMING_SNAKE_CASE_ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __a ):
SCREAMING_SNAKE_CASE_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
SCREAMING_SNAKE_CASE_ = '; '.join(__a )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(__a )}' )
return
subprocess.run(__a )
print('Successfully setup pod.' )
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = tpu_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
tpu_command_launcher(__a )
| 205
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''Speech2TextFeatureExtractor'''
__UpperCAmelCase : Optional[Any] = '''Speech2TextTokenizer'''
def __init__( self : Union[str, Any] ,_a : Optional[int] ,_a : int ):
'''simple docstring'''
super().__init__(_a ,_a )
_a : List[str] = self.feature_extractor
_a : int = False
def __call__( self : Tuple ,*_a : str ,**_a : Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a ,**_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_a : Any = kwargs.pop('raw_speech' )
else:
_a : Dict = kwargs.pop('audio' ,_a )
_a : Tuple = kwargs.pop('sampling_rate' ,_a )
_a : List[str] = kwargs.pop('text' ,_a )
if len(_a ) > 0:
_a : Dict = args[0]
_a : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_a : str = self.feature_extractor(_a ,*_a ,sampling_rate=_a ,**_a )
if text is not None:
_a : Optional[Any] = self.tokenizer(_a ,**_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a : Tuple = encodings['input_ids']
return inputs
def __lowercase ( self : List[Any] ,*_a : Optional[int] ,**_a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : Optional[Any] ,*_a : List[str] ,**_a : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@contextmanager
def __lowercase ( self : Any ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_a : Dict = True
_a : List[str] = self.tokenizer
yield
_a : Tuple = self.feature_extractor
_a : List[str] = False
| 229
| 0
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : str ) -> Any:
UpperCAmelCase : List[str] = 0
if start < end:
UpperCAmelCase : Dict = randint(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Dict = a[end]
UpperCAmelCase : Dict = a[pivot]
UpperCAmelCase : str = temp
UpperCAmelCase , UpperCAmelCase : Tuple = _in_place_partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += _in_place_quick_sort(_lowerCAmelCase , _lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(_lowerCAmelCase , p + 1 , _lowerCAmelCase )
return count
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : int = 0
UpperCAmelCase : List[Any] = randint(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : List[Any] = a[end]
UpperCAmelCase : Dict = a[pivot]
UpperCAmelCase : List[Any] = temp
UpperCAmelCase : Union[str, Any] = start - 1
for index in range(_lowerCAmelCase , _lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase : Optional[Any] = new_pivot_index + 1
UpperCAmelCase : str = a[new_pivot_index]
UpperCAmelCase : str = a[index]
UpperCAmelCase : Dict = temp
UpperCAmelCase : Dict = a[new_pivot_index + 1]
UpperCAmelCase : List[Any] = a[end]
UpperCAmelCase : Optional[int] = temp
return new_pivot_index + 1, count
UpperCamelCase__: List[Any] = TemporaryFile()
UpperCamelCase__: Dict = 100 # 1000 elements are to be sorted
UpperCamelCase__ , UpperCamelCase__: List[str] = 0, 1 # mean and standard deviation
UpperCamelCase__: str = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase__: int = np.load(outfile)
UpperCamelCase__: Tuple = len(M) - 1
UpperCamelCase__: Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 528
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
UpperCamelCase__: Tuple = [8, 5, 9, 7]
UpperCamelCase__: int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase__: List[str] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : str , __snake_case : list[int] , __snake_case : list[list[int]] , __snake_case : list[list[int]] , ) -> None:
UpperCAmelCase : Union[str, Any] = claim_vector
UpperCAmelCase : Tuple = allocated_resources_table
UpperCAmelCase : List[str] = maximum_claim_table
def A ( self : Optional[Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A ( self : Union[str, Any] ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A ( self : str ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A ( self : List[Any] ) -> dict[int, list[int]]:
return {self.__need().index(__snake_case ): i for i in self.__need()}
def A ( self : Tuple , **__snake_case : Union[str, Any] ) -> None:
UpperCAmelCase : Tuple = self.__need()
UpperCAmelCase : Union[str, Any] = self.__allocated_resources_table
UpperCAmelCase : int = self.__available_resources()
UpperCAmelCase : Tuple = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
UpperCAmelCase : Dict = False
for each_need in need_list:
UpperCAmelCase : Optional[int] = True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
UpperCAmelCase : List[str] = False
break
if execution:
UpperCAmelCase : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
UpperCAmelCase : Optional[int] = np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def A ( self : Dict ) -> Optional[Any]:
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__snake_case ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__snake_case ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LEDTokenizer
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
__a = getattr(lowerCamelCase__ , pre_tok_state.pop("""type""" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase__ )
__a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__a = """post_processor"""
__a = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
__a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a = tuple(state["""sep"""] )
if "cls" in state:
__a = tuple(state["""cls"""] )
__a = False
if state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
__a = add_prefix_space
__a = True
if state.get("""trim_offsets""" , lowerCamelCase__ ) != trim_offsets:
__a = trim_offsets
__a = True
if changes_to_apply:
__a = getattr(lowerCamelCase__ , state.pop("""type""" ) )
__a = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def snake_case_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self , __A ):
__a = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
__a = value
def snake_case_ ( self , *__A , **__A ):
__a = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case_ ( self , *__A , **__A ):
__a = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case_ ( self , __A , __A = None ):
__a = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def snake_case_ ( self , __A , __A=None ):
__a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , __A , __A = None , __A = PaddingStrategy.DO_NOT_PAD , __A = None , __A = None , ):
__a = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__a = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__a = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__a = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
__a = len(lowerCamelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__a = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__a = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 99
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = size if size is not None else {'shortest_edge': 256}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCamelCase__ ):
__lowerCamelCase = target_sizes.numpy()
__lowerCamelCase = []
for idx in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
__lowerCamelCase = logits.argmax(dim=1 )
__lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 469
| 0
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
random.seed(snake_case_ )
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : Iterable[torch.nn.Parameter] , __A : float = 0.9_9_9_9 , __A : float = 0.0 , __A : int = 0 , __A : bool = False , __A : Union[float, int] = 1.0 , __A : Union[float, int] = 2 / 3 , __A : Optional[Any] = None , __A : Dict[str, Any] = None , **__A : List[str] , ):
if isinstance(__A , torch.nn.Module ):
snake_case__ : Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __A , standard_warn=__A , )
snake_case__ : Union[str, Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case__ : List[str] = True
if kwargs.get("max_value" , __A ) is not None:
snake_case__ : Any = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __A , standard_warn=__A )
snake_case__ : str = kwargs["max_value"]
if kwargs.get("min_value" , __A ) is not None:
snake_case__ : Dict = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __A , standard_warn=__A )
snake_case__ : Any = kwargs["min_value"]
snake_case__ : Tuple = list(__A )
snake_case__ : str = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __A ) is not None:
snake_case__ : Dict = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __A , standard_warn=__A )
self.to(device=kwargs["device"] )
snake_case__ : List[str] = None
snake_case__ : Tuple = decay
snake_case__ : Dict = min_decay
snake_case__ : int = update_after_step
snake_case__ : Any = use_ema_warmup
snake_case__ : Union[str, Any] = inv_gamma
snake_case__ : Optional[int] = power
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = None # set in `step()`
snake_case__ : List[Any] = model_cls
snake_case__ : int = model_config
@classmethod
def _lowercase ( cls : Dict , __A : Optional[int] , __A : int ):
snake_case__, snake_case__ : Any = model_cls.load_config(__A , return_unused_kwargs=__A )
snake_case__ : Tuple = model_cls.from_pretrained(__A )
snake_case__ : List[Any] = cls(model.parameters() , model_cls=__A , model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def _lowercase ( self : int , __A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
snake_case__ : Any = self.model_cls.from_config(self.model_config )
snake_case__ : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , __A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def _lowercase ( self : Tuple , __A : int ):
snake_case__ : str = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case__ : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case__ : Union[str, Any] = (1 + step) / (1_0 + step)
snake_case__ : List[Any] = min(__A , self.decay )
# make sure decay is not smaller than min_decay
snake_case__ : Union[str, Any] = max(__A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _lowercase ( self : Any , __A : Iterable[torch.nn.Parameter] ):
if isinstance(__A , torch.nn.Module ):
snake_case__ : int = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __A , standard_warn=__A , )
snake_case__ : str = parameters.parameters()
snake_case__ : List[Any] = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case__ : str = self.get_decay(self.optimization_step )
snake_case__ : Optional[Any] = decay
snake_case__ : str = 1 - decay
snake_case__ : Tuple = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case__ : Any = deepspeed.zero.GatheredParameters(__A , modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def _lowercase ( self : str , __A : Iterable[torch.nn.Parameter] ):
snake_case__ : Union[str, Any] = list(__A )
for s_param, param in zip(self.shadow_params , __A ):
param.data.copy_(s_param.to(param.device ).data )
def _lowercase ( self : Optional[int] , __A : Tuple=None , __A : Tuple=None ):
snake_case__ : Union[str, Any] = [
p.to(device=__A , dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def _lowercase ( self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _lowercase ( self : Tuple , __A : Iterable[torch.nn.Parameter] ):
snake_case__ : Any = [param.detach().cpu().clone() for param in parameters]
def _lowercase ( self : str , __A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __A ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case__ : Dict = None
def _lowercase ( self : List[str] , __A : dict ):
snake_case__ : Tuple = copy.deepcopy(__A )
snake_case__ : List[Any] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
snake_case__ : Optional[Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __A ):
raise ValueError("Invalid min_decay" )
snake_case__ : int = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __A ):
raise ValueError("Invalid optimization_step" )
snake_case__ : List[str] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __A ):
raise ValueError("Invalid update_after_step" )
snake_case__ : Dict = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __A ):
raise ValueError("Invalid use_ema_warmup" )
snake_case__ : Any = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
snake_case__ : Union[str, Any] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
snake_case__ : List[str] = state_dict.get("shadow_params" , __A )
if shadow_params is not None:
snake_case__ : Dict = shadow_params
if not isinstance(self.shadow_params , __A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 25
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase : Any = {'unk_token': '<unk>'}
_lowercase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase))
_lowercase : Optional[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowercase : Optional[int] = os.path.join(self.tmpdirname, lowerCamelCase)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta)]
_lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : Dict = self.get_image_processor()
_lowercase : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
_lowercase : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase)
_lowercase : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
_lowercase : str = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase)
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase)
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowercase : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0)
_lowercase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = self.get_image_processor()
_lowercase : int = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : Optional[int] = self.prepare_image_inputs()
_lowercase : str = image_processor(lowerCamelCase, return_tensors='np')
_lowercase : Optional[int] = processor(images=lowerCamelCase, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : int = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : str = 'lower newer'
_lowercase : Dict = processor(text=lowerCamelCase)
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : int = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : List[Any] = 'lower newer'
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Any = processor(text=lowerCamelCase, images=lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase):
processor()
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : str = self.prepare_image_inputs()
_lowercase : str = self.prepare_image_inputs()
_lowercase : Union[str, Any] = processor(images=lowerCamelCase, visual_prompt=lowerCamelCase)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'conditional_pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase):
processor()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase)
_lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : Optional[int] = processor.batch_decode(lowerCamelCase)
_lowercase : List[str] = tokenizer.batch_decode(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
| 89
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase = """document_qa"""
lowercase = AutoProcessor
lowercase = VisionEncoderDecoderModel
lowercase = ["""image""", """text"""]
lowercase = ["""text"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCamelCase = task_prompt.replace("{user_input}" , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCamelCase = self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE , ).sequences
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
UpperCamelCase = re.sub(R"<.*?>" , "" , SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 606
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( lowercase_ ):
__a : torch.FloatTensor
__a : torch.FloatTensor
__a : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( lowercase_ , lowercase_ ):
__a : List[Any] = 2
@register_to_config
def __init__( self ,snake_case__ = 0.02 ,snake_case__ = 100 ,snake_case__ = 1.007 ,snake_case__ = 80 ,snake_case__ = 0.05 ,snake_case__ = 50 ,):
SCREAMING_SNAKE_CASE_ : Tuple = sigma_max
# setable values
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None # sigma(t_i)
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
return sample
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = num_inference_steps
SCREAMING_SNAKE_CASE_ : List[Any] = np.arange(0 ,self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(UpperCamelCase__ ,dtype=torch.floataa ,device=UpperCamelCase__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ):
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE_ : Dict = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE_ : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE_ : Any = self.config.s_noise * randn_tensor(sample.shape ,generator=UpperCamelCase__ ).to(sample.device )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigma + gamma * sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : List[str] = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE_ : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ ,derivative=UpperCamelCase__ ,pred_original_sample=UpperCamelCase__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE_ : int = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ ,derivative=UpperCamelCase__ ,pred_original_sample=UpperCamelCase__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
raise NotImplementedError()
| 711
|
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685
| 0
|
import math
def _SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def _SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 518
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(snake_case , snake_case )
return k
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> PegasusForConditionalGeneration:
_UpperCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(snake_case )
_UpperCAmelCase = PegasusConfig(**snake_case )
_UpperCAmelCase = PegasusForConditionalGeneration(snake_case )
_UpperCAmelCase = torch_model.model.state_dict()
_UpperCAmelCase = {}
for k, v in tf_weights.items():
_UpperCAmelCase = rename_state_dict_key(snake_case )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.tensor(snake_case , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
_UpperCAmelCase = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_UpperCAmelCase = mapping["""shared.weight"""]
_UpperCAmelCase = mapping["""shared.weight"""]
_UpperCAmelCase = {k: torch.zeros_like(snake_case ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = torch_model.model.load_state_dict(snake_case , strict=snake_case )
_UpperCAmelCase = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def _SCREAMING_SNAKE_CASE ( snake_case="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
_UpperCAmelCase = tf.train.list_variables(snake_case )
_UpperCAmelCase = {}
_UpperCAmelCase = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(snake_case , desc="""converting tf checkpoint to dict""" ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(snake_case , snake_case )
_UpperCAmelCase = array
return tf_weights
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[str]:
# save tokenizer first
_UpperCAmelCase = Path(snake_case ).parent.name
_UpperCAmelCase = task_specific_params[f"summarization_{dataset}"]["""max_position_embeddings"""]
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case )
# convert model
_UpperCAmelCase = get_tf_weights_as_numpy(snake_case )
_UpperCAmelCase = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
_UpperCAmelCase = task_specific_params
_UpperCAmelCase = convert_pegasus(snake_case , snake_case )
torch_model.save_pretrained(snake_case )
_UpperCAmelCase = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(snake_case , Path(snake_case ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a = parser.parse_args()
if args.save_dir is None:
a = Path(args.tf_ckpt_path).parent.name
a = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 518
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
a__ = """dinat"""
a__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=64 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 16] , lowercase=7 , lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1e-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowercase)
a__: int = patch_size
a__: str = num_channels
a__: str = embed_dim
a__: Dict = depths
a__: int = len(lowercase)
a__: str = num_heads
a__: List[Any] = kernel_size
a__: Union[str, Any] = dilations
a__: List[str] = mlp_ratio
a__: Tuple = qkv_bias
a__: Union[str, Any] = hidden_dropout_prob
a__: List[Any] = attention_probs_dropout_prob
a__: Dict = drop_path_rate
a__: Union[str, Any] = hidden_act
a__: Any = layer_norm_eps
a__: Any = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__: Any = int(embed_dim * 2 ** (len(lowercase) - 1))
a__: Any = layer_scale_init_value
a__: Tuple = ['stem'] + [f'stage{idx}' for idx in range(1 , len(lowercase) + 1)]
a__: Tuple = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names)
| 716
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __snake_case ( __lowerCAmelCase ):
a__ = """deit"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=2_24 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase)
a__: Optional[int] = hidden_size
a__: List[Any] = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Union[str, Any] = intermediate_size
a__: Optional[Any] = hidden_act
a__: Optional[Any] = hidden_dropout_prob
a__: List[Any] = attention_probs_dropout_prob
a__: List[str] = initializer_range
a__: Optional[int] = layer_norm_eps
a__: Dict = image_size
a__: Dict = patch_size
a__: List[Any] = num_channels
a__: Optional[Any] = qkv_bias
a__: List[Any] = encoder_stride
class __snake_case ( __lowerCAmelCase ):
a__ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
return 1e-4
| 217
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__A , )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
__a = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_zero=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, 16, 16) , rng=random.Random(__A ) ).to(__A )
__a = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__a = self.get_dummy_inputs(__A )
__a = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
__a = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(__A )
__a = pipe_loaded(**__A )[0]
__a = np.abs(output - output_loaded ).max()
self.assertLess(__A , 1E-4 )
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_mask_inputs(__A )
__a = pipe.generate_mask(**__A )
__a = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__a = np.array([0] * 9 )
__a = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inversion_inputs(__A )
__a = pipe.invert(**__A ).images
__a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
__a = DPMSolverMultistepScheduler(**__A )
__a = DPMSolverMultistepInverseScheduler(**__A )
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inversion_inputs(__A )
__a = pipe.invert(**__A ).images
__a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def snake_case_ ( cls ):
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
__a = raw_image.convert("""RGB""" ).resize((768, 768) )
__a = raw_image
def snake_case_ ( self ):
__a = torch.manual_seed(0 )
__a = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=__A , torch_dtype=torch.floataa )
__a = DDIMScheduler.from_config(pipe.scheduler.config )
__a = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__a = """a bowl of fruit"""
__a = """a bowl of pears"""
__a = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
__a = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A ).latents
__a = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
__a = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def snake_case_ ( self ):
__a = torch.manual_seed(0 )
__a = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=__A , torch_dtype=torch.floataa )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__a = """a bowl of fruit"""
__a = """a bowl of pears"""
__a = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
__a = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A , num_inference_steps=25 , ).latents
__a = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
__a = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 99
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger()
@dataclass
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : Optional[int] = field(default_factory=UpperCamelCase__ )
a : Union[str, Any] = field(default_factory=UpperCamelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
__lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self , UpperCamelCase ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase_ ( self ) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase__ :
a : str = 4_2
a : List[str] = 4_2
a : Tuple = 1
a : int = field(default_factory=UpperCamelCase__ )
a : List[str] = field(default_factory=UpperCamelCase__ )
a : Union[str, Any] = True
def __call__( self , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
__lowerCAmelCase = Tracker(self.src )(UpperCAmelCase__ ).parametrized
__lowerCAmelCase = list(filter(lambda UpperCamelCase : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
__lowerCAmelCase = list(filter(lambda UpperCamelCase : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while'''
F''' destination module has {len(UpperCAmelCase__ )}.''' )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase ) -> List[str]:
super().__init__()
__lowerCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
__lowerCAmelCase = len(UpperCAmelCase__ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
__lowerCAmelCase = nn.ModuleDict(UpperCAmelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
return get_trunk_forward_outputs(
UpperCAmelCase__ , out_feat_keys=UpperCAmelCase__ , feature_blocks=self._feature_blocks , )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , UpperCamelCase ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
__lowerCAmelCase = self.convert_name_to_timm(UpperCAmelCase__ )
__lowerCAmelCase = partial(lambda: (timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval(), None) )
else:
__lowerCAmelCase = super().__getitem__(UpperCAmelCase__ )
return val
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __getitem__( self , UpperCamelCase ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
__lowerCAmelCase = RegNetModel
else:
__lowerCAmelCase = RegNetForImageClassification
return val
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
__lowerCAmelCase = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Callable[[], nn.Module] , lowerCamelCase : Callable[[], nn.Module] , lowerCamelCase : RegNetConfig , lowerCamelCase : Path , lowerCamelCase : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase = from_model_func()
__lowerCAmelCase = our_model_func(_UpperCamelCase ).eval()
__lowerCAmelCase = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase , raise_if_mismatch=_UpperCamelCase )
__lowerCAmelCase = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_UpperCamelCase )
if from_state_dict is not None:
__lowerCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowerCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__lowerCAmelCase = manually_copy_vissl_head(_UpperCamelCase , our_model.state_dict() , _UpperCamelCase )
our_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = our_model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
__lowerCAmelCase = (
our_outputs.logits if isinstance(_UpperCamelCase , _UpperCamelCase ) else our_outputs.last_hidden_state
)
__lowerCAmelCase = from_model(_UpperCamelCase )
__lowerCAmelCase = from_output[-1] if type(_UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowerCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
__lowerCAmelCase = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
__lowerCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
print(f'''Pushed {name}''' )
def __lowerCAmelCase ( lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : bool = True ):
'''simple docstring'''
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = 10_00
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
__lowerCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__lowerCAmelCase = NameToOurModelFuncMap()
__lowerCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase : str , lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase , model_dir=str(_UpperCamelCase ) , map_location="cpu" )
__lowerCAmelCase = model_func()
# check if we have a head, if yes add it
__lowerCAmelCase = files["classy_state_dict"]["base_model"]["model"]
__lowerCAmelCase = model_state_dict["trunk"]
model.load_state_dict(_UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 720
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 39
| 0
|
"""simple docstring"""
import random
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = a[left_index]
UpperCAmelCase = left_index + 1
for j in range(left_index + 1 , A__ ):
if a[j] < pivot:
UpperCAmelCase = a[i], a[j]
i += 1
UpperCAmelCase = a[i - 1], a[left_index]
return i - 1
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if left < right:
UpperCAmelCase = random.randint(A__ , right - 1 )
UpperCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCAmelCase = partition(A__ , A__ , A__ )
quick_sort_random(
A__ , A__ , A__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
A__ , pivot_index + 1 , A__ ) # recursive quicksort to the right of the pivot point
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCAmelCase = [int(A__ ) for item in user_input.split(""",""" )]
quick_sort_random(A__ , 0 , len(A__ ) )
print(A__ )
if __name__ == "__main__":
main()
| 673
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__, A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(A__, A__, bias=A__ )
SCREAMING_SNAKE_CASE_ : Dict = emb.weight.data
return lin_layer
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(A__, map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE_ : Any = mam_aaa['model']
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE_ : Dict = MaMaaaConfig(
vocab_size=A__, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', )
SCREAMING_SNAKE_CASE_ : int = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MaMaaaForConditionalGeneration(A__ )
model.model.load_state_dict(A__, strict=A__ )
SCREAMING_SNAKE_CASE_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
lowerCAmelCase__ : int =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 101
| 0
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowerCamelCase_ : str = tmp_path / 'cache'
lowerCamelCase_ : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ : Any = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
lowerCamelCase_ : int = tmp_path / 'cache'
lowerCamelCase_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase_ : Optional[Any] = features.copy() if features else default_expected_features
lowerCamelCase_ : str = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ : Optional[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_sql_dataset(lowerCamelCase__ , lowerCamelCase__ )
def _a ( lowerCamelCase__ ) -> List[str]:
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
lowerCamelCase_ : Tuple = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowerCamelCase_ : str = tmp_path / 'cache'
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCamelCase__ , 'tmp.sql' )
lowerCamelCase_ : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
lowerCamelCase_ : Union[str, Any] = iter_sql_file(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
lowerCamelCase_ : Optional[int] = tmp_path / 'cache'
lowerCamelCase_ : Tuple = os.path.join(lowerCamelCase__ , 'tmp.sql' )
lowerCamelCase_ : Optional[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCamelCase__ ).read()
SqlDatasetWriter(lowerCamelCase__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
lowerCamelCase_ : Optional[Any] = iter_sql_file(lowerCamelCase__ )
lowerCamelCase_ : Any = iter_sql_file(lowerCamelCase__ )
for rowa, rowa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : Optional[int] = tmp_path / 'cache'
lowerCamelCase_ : int = os.path.join(lowerCamelCase__ , 'tmp.sql' )
lowerCamelCase_ : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCamelCase__ ).read()
with pytest.raises(lowerCamelCase__ ):
SqlDatasetWriter(lowerCamelCase__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 144
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''google/realm-cc-news-pretrained-embedder''': 5_1_2,
'''google/realm-cc-news-pretrained-encoder''': 5_1_2,
'''google/realm-cc-news-pretrained-scorer''': 5_1_2,
'''google/realm-cc-news-pretrained-openqa''': 5_1_2,
'''google/realm-orqa-nq-openqa''': 5_1_2,
'''google/realm-orqa-nq-reader''': 5_1_2,
'''google/realm-orqa-wq-openqa''': 5_1_2,
'''google/realm-orqa-wq-reader''': 5_1_2,
}
UpperCamelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = RealmTokenizer
def __init__(self : Optional[Any] , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : str=True , _snake_case : List[Any]="[UNK]" , _snake_case : int="[SEP]" , _snake_case : List[Any]="[PAD]" , _snake_case : Dict="[CLS]" , _snake_case : Dict="[MASK]" , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=None , **_snake_case : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCamelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCamelCase_ : Optional[int] = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCamelCase_ : Optional[int] = do_lower_case
lowerCamelCase_ : Tuple = strip_accents
lowerCamelCase_ : List[Any] = tokenize_chinese_chars
lowerCamelCase_ : Union[str, Any] = normalizer_class(**_snake_case )
lowerCamelCase_ : Tuple = do_lower_case
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] , **_snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ : List[Any] = text
lowerCamelCase_ : List[Any] = kwargs.pop('text_pair' , _snake_case )
lowerCamelCase_ : Any = kwargs.pop('return_tensors' , _snake_case )
lowerCamelCase_ : str = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_snake_case ):
if batch_text_pair is not None:
lowerCamelCase_ : Union[str, Any] = batch_text_pair[idx]
else:
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Any = super().__call__(_snake_case , _snake_case , return_tensors=_snake_case , **_snake_case )
lowerCamelCase_ : Union[str, Any] = encoded_candidates.get('input_ids' )
lowerCamelCase_ : List[Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase_ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_snake_case )
lowerCamelCase_ : Dict = {key: item for key, item in output_data.items() if len(_snake_case ) != 0}
return BatchEncoding(_snake_case , tensor_type=_snake_case )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ (self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ (self : str , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 144
| 1
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase):
__magic_name__ = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowercase ( self : List[str] ) -> Optional[int]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowercase ( self : Dict ) -> Any:
snake_case : Optional[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
snake_case : Tuple = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-05, "token": 25506, "token_str": " accuser"},
] , )
snake_case : Union[str, Any] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-05,
"token": 25506,
"token_str": " accuser",
},
] , )
snake_case : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __lowercase ( self : Dict ) -> Optional[Any]:
snake_case : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
snake_case : Optional[Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-05, "token": 16416, "token_str": "ELS"},
] , )
snake_case : List[Any] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 16416, "token_str": "ELS"},
] , )
snake_case : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-05, "token": 13606, "token_str": " Clara"},
] , )
snake_case : Tuple = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __lowercase ( self : List[Any] ) -> Any:
snake_case : Optional[int] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
snake_case : str = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def __lowercase ( self : str ) -> List[str]:
snake_case : Union[str, Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_lowercase )
@slow
@require_tf
def __lowercase ( self : List[str] ) -> Tuple:
snake_case : Dict = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_lowercase )
def __lowercase ( self : List[str] , _lowercase : str ) -> int:
snake_case : Union[str, Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
snake_case : Dict = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
snake_case : Union[str, Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __lowercase ( self : Tuple ) -> Optional[Any]:
snake_case : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
snake_case : Optional[Any] = None
snake_case : List[str] = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def __lowercase ( self : Tuple ) -> List[Any]:
snake_case : Union[str, Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
snake_case : Tuple = None
snake_case : str = None
self.run_pipeline_test(_lowercase , [] )
def __lowercase ( self : List[Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int] ) -> str:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
snake_case : Union[str, Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case : str = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __lowercase ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] ) -> Tuple:
snake_case : Any = fill_masker.tokenizer
snake_case : List[Any] = fill_masker.model
snake_case : Union[str, Any] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowercase , [
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
] , )
snake_case : str = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowercase , [
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
] , )
snake_case : Dict = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowercase , [
[
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
],
[
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker("This is" )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def __lowercase ( self : Tuple , _lowercase : List[Any] , _lowercase : Optional[Any] ) -> List[Any]:
snake_case : Any = tokenizer.get_vocab()
snake_case : Optional[int] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case : Any = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
snake_case : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
] , )
snake_case : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowercase )
snake_case : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowercase ) )
# Call argument
snake_case : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
self.assertEqual(
_lowercase , [
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
] , )
snake_case : str = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _lowercase )
snake_case : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_lowercase ) )
# Score equivalence
snake_case : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
snake_case : Union[str, Any] = [top_mask["token_str"] for top_mask in outputs]
snake_case : Dict = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
snake_case : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
snake_case : Union[str, Any] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
snake_case : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
snake_case : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(_lowercase ):
snake_case : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="" )
def __lowercase ( self : Any , _lowercase : Optional[Any] , _lowercase : List[Any] ) -> Any:
snake_case : Optional[Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
snake_case : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
] , )
snake_case : str = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def __lowercase ( self : Optional[int] , _lowercase : int , _lowercase : List[Any] ) -> Any:
snake_case : Optional[Any] = tokenizer.get_vocab()
snake_case : str = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
snake_case : List[Any] = sorted(vocab.keys() )[:3]
snake_case : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case : Optional[int] = [el["token_str"] for el in sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
snake_case : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def __lowercase ( self : Tuple , _lowercase : Dict , _lowercase : Union[str, Any] ) -> Optional[int]:
snake_case : Dict = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case : Optional[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case : List[str] = sorted(vocab.keys() )[:3]
snake_case : Any = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case : str = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def __lowercase ( self : str , _lowercase : str , _lowercase : List[Any] ) -> Dict:
snake_case : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case : Tuple = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
[
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
],
[
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
],
[
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
{"sequence": ANY(_lowercase ), "score": ANY(_lowercase ), "token": ANY(_lowercase ), "token_str": ANY(_lowercase )},
],
] , )
| 449
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A = False
class _a ( unittest.TestCase):
def __lowercase ( self : str ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self : List[str] ) -> Any:
return 12
@property
def __lowercase ( self : List[str] ) -> Dict:
return 12
@property
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
return 32
@property
def __lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
snake_case : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __lowercase ( self : str ) -> List[Any]:
snake_case : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowercase )
@property
def __lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : List[Any] = 12
snake_case : Dict = 12
snake_case : Tuple = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case : Dict = TransformeraDModel(**_lowercase )
return model
def __lowercase ( self : Optional[int] ) -> Tuple:
snake_case : Optional[Any] = "cpu"
snake_case : Optional[int] = self.dummy_vqvae
snake_case : Dict = self.dummy_text_encoder
snake_case : Tuple = self.dummy_tokenizer
snake_case : List[Any] = self.dummy_transformer
snake_case : List[Any] = VQDiffusionScheduler(self.num_embed )
snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case : Dict = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Optional[Any] = "teddy bear playing in the pool"
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[int] = output.images
snake_case : int = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : List[str] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
snake_case : List[str] = "cpu"
snake_case : Dict = self.dummy_vqvae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Optional[Any] = self.dummy_tokenizer
snake_case : int = self.dummy_transformer
snake_case : str = VQDiffusionScheduler(self.num_embed )
snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case : List[str] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Optional[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Dict = "teddy bear playing in the pool"
snake_case : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[Any] = output.images
snake_case : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def __lowercase ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ) -> Tuple:
snake_case : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case : Tuple = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Optional[int] = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_lowercase , output_type="np" , )
snake_case : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 449
| 1
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetaDModel
snake_case = """sample"""
@property
def _lowercase ( self ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ):
return (3, 32, 32)
@property
def _lowercase ( self ):
return (3, 32, 32)
def _lowercase ( self ):
snake_case_ = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetaDModel
snake_case = """sample"""
@property
def _lowercase ( self ):
snake_case_ = 4
snake_case_ = 4
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ):
return (4, 32, 32)
@property
def _lowercase ( self ):
return (4, 32, 32)
def _lowercase ( self ):
snake_case_ = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ):
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _lowercase ( self ):
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _lowercase ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model_accelerate.to(UpperCAmelCase_ )
model_accelerate.eval()
snake_case_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = noise.to(UpperCAmelCase_ )
snake_case_ = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
snake_case_ = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_ )
model_normal_load.to(UpperCAmelCase_ )
model_normal_load.eval()
snake_case_ = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
def _lowercase ( self ):
snake_case_ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase_ )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = noise.to(UpperCAmelCase_ )
snake_case_ = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 ) )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetaDModel
snake_case = """sample"""
@property
def _lowercase ( self , UpperCAmelCase_=(32, 32) ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ):
return (3, 32, 32)
@property
def _lowercase ( self ):
return (3, 32, 32)
def _lowercase ( self ):
snake_case_ = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _lowercase ( self ):
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = self.dummy_input
snake_case_ = floats_tensor((4, 3) + (2_56, 2_56) ).to(UpperCAmelCase_ )
snake_case_ = noise
snake_case_ = model(**UpperCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def _lowercase ( self ):
snake_case_ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase_ )
snake_case_ = 4
snake_case_ = 3
snake_case_ = (2_56, 2_56)
snake_case_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
snake_case_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def _lowercase ( self ):
snake_case_ = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase_ )
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
snake_case_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def _lowercase ( self ):
# not required for this model
pass
| 420
|
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("CPU" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("GPU" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("Model" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
rect.set_stroke(UpperCAmelCase_ )
snake_case_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase_ , buff=0.0 )
self.add(UpperCAmelCase_ )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ , *UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("Loaded Checkpoint" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.7 )
target.move_to(UpperCAmelCase_ )
ckpt_arr.append(UpperCAmelCase_ )
snake_case_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
snake_case_ = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("Disk" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , Write(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) )
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase_ , run_time=1.5 ) )
self.play(*UpperCAmelCase_ )
self.play(FadeOut(UpperCAmelCase_ ) )
snake_case_ = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ , *UpperCAmelCase_ ) , )
self.wait()
| 420
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 86
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch
| 86
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( __snake_case ):
def __init__( self : Any , *a : Union[str, Any] , a : Optional[Any]=None , a : Tuple=None , **a : str ) -> Dict:
"""simple docstring"""
super().__init__(*a , **a )
lowercase = eval_examples
lowercase = post_process_function
def _lowerCAmelCase ( self : int , a : str=None , a : List[str]=None , a : Union[str, Any]=None , a : str = "eval" ) -> int:
"""simple docstring"""
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(a )
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase = time.time()
try:
lowercase = eval_loop(
a , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , metric_key_prefix=a , )
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a , a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(a , a , output.predictions )
lowercase = self.compute_metrics(a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase = metrics.pop(a )
metrics.update(output.metrics )
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase = self.callback_handler.on_evaluate(self.args , self.state , self.control , a )
return metrics
def _lowerCAmelCase ( self : Optional[Any] , a : Any , a : int , a : Optional[int]=None , a : str = "test" ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_test_dataloader(a )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase = time.time()
try:
lowercase = eval_loop(
a , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , metric_key_prefix=a , )
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a , a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(a , a , output.predictions , '''predict''' )
lowercase = self.compute_metrics(a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase = metrics.pop(a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a )
| 396
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__lowerCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__lowerCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__lowerCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 396
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
lowercase_ = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase_ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
lowercase_ = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
lowercase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 97
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 118
| 0
|
def A_ ( __a : list[int] , __a : list[int] , __a : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__a ) )
def A_ ( __a : list[list[int]] , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
# Base Case
if index == len(__a ):
return True
# Recursive Step
for i in range(__a ):
if valid_coloring(graph[index] , __a , __a ):
# Color current vertex
a__ = i
# Validate coloring
if util_color(__a , __a , __a , index + 1 ):
return True
# Backtrack
a__ = -1
return False
def A_ ( __a : list[list[int]] , __a : int ):
"""simple docstring"""
a__ = [-1] * len(__a )
if util_color(__a , __a , __a , 0 ):
return colored_vertices
return []
| 713
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
a__ = tempfile.mkdtemp()
# fmt: off
a__ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a__ = dict(zip(a_ , range(len(a_ ) ) ) )
a__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a__ = {"""unk_token""": """<unk>"""}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
a__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
a__ = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(a_ , a_ )
def _a ( self , **a_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **a_ )
def _a ( self , **a_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **a_ )
def _a ( self , **a_ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
a__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ):
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = self.get_image_processor()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a_ )
self.assertIsInstance(processor_fast.tokenizer , a_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a_ )
self.assertIsInstance(processor_fast.image_processor , a_ )
def _a ( self ):
a__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a__ = self.get_image_processor(do_normalize=a_ )
a__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=a_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = self.prepare_image_inputs()
a__ = image_processor(a_ , return_tensors="""np""" )
a__ = processor(images=a_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = """lower newer"""
a__ = processor(text=a_ , return_tensors="""np""" )
a__ = tokenizer(a_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = """lower newer"""
a__ = self.prepare_image_inputs()
a__ = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = ["""cat""", """nasa badge"""]
a__ = processor(text=a_ )
a__ = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = [["""cat""", """nasa badge"""], ["""person"""]]
a__ = processor(text=a_ )
a__ = 16
a__ = len(a_ )
a__ = max([len(a_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = ["""cat""", """nasa badge"""]
a__ = processor(text=a_ )
a__ = 16
a__ = inputs["""input_ids"""]
a__ = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = self.prepare_image_inputs()
a__ = self.prepare_image_inputs()
a__ = processor(images=a_ , query_images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(a_ )
a__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
| 351
| 0
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __A ( a_ : Optional[Any] , a_ : int , a_ : Tuple=0 )-> Any:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
else:
SCREAMING_SNAKE_CASE : Dict = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
SCREAMING_SNAKE_CASE : List[Any] = fmt.format(a_ )
# Print and recurse (if needed).
if isinstance(a_ , a_ ):
if msg is not None:
print(a_ )
for k in val.keys():
recursive_print(a_ , val[k] , spaces + 2 )
elif isinstance(a_ , torch.Tensor ):
print(a_ , ''':''' , val.size() )
else:
print(a_ , ''':''' , a_ )
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Dict , a_ : Tuple )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE : Dict = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE : Tuple = param.view(*a_ )
SCREAMING_SNAKE_CASE : str = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE : Any = param.view(*a_ )
SCREAMING_SNAKE_CASE : Tuple = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = param.view(*a_ )
return param
def __A ( a_ : Any , a_ : Union[str, Any] , a_ : Optional[int] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE : Any = input_state_dict.get('''args''' , a_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE : Dict = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE : List[str] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = ds_args.hidden_size
SCREAMING_SNAKE_CASE : List[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE : Any = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE : Union[str, Any] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE : Dict = input_state_dict['''checkpoint_version''']
else:
SCREAMING_SNAKE_CASE : List[Any] = 0.0
# The model.
SCREAMING_SNAKE_CASE : List[str] = input_state_dict['''model''']
# The language model.
SCREAMING_SNAKE_CASE : Optional[int] = model['''language_model''']
# The embeddings.
SCREAMING_SNAKE_CASE : Dict = lm['''embedding''']
# The word embeddings.
SCREAMING_SNAKE_CASE : Any = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE : List[str] = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE : Optional[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
SCREAMING_SNAKE_CASE : List[str] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE : List[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
SCREAMING_SNAKE_CASE : Dict = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE : Optional[int] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE : Any = layer_re.match(a_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE : Tuple = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE : Tuple = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE : Tuple = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
SCREAMING_SNAKE_CASE : Any = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
SCREAMING_SNAKE_CASE : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE : Dict = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = masked_bias
SCREAMING_SNAKE_CASE : Dict = fix_query_key_value_ordering(a_ , a_ , 3 , a_ , a_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE : Optional[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE : List[str] = fix_query_key_value_ordering(a_ , a_ , 3 , a_ , a_ )
# Store. No change of shape.
SCREAMING_SNAKE_CASE : Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE : List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE : Any = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE : Optional[int] = transformer['''final_layernorm.weight''']
SCREAMING_SNAKE_CASE : List[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE : int = word_embeddings
# It should be done!
return output_state_dict
def __A ( )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=a_ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=a_ , help='''An optional config json file describing the pre-trained model.''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(a_ , map_location='''cpu''' )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Tuple = input_state_dict.get('''args''' , a_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE : int = '''gelu_fast'''
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE : int = '''gelu_new'''
else:
SCREAMING_SNAKE_CASE : Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE : Any = '''gelu_new'''
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE : Dict = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=a_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=a_ , summary_activation=a_ , summary_proj_to_labels=a_ , summary_first_dropout=0.1 , scale_attn_weights=a_ , use_cache=a_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
SCREAMING_SNAKE_CASE : Dict = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE : Optional[int] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
SCREAMING_SNAKE_CASE : Optional[Any] = convert_megatron_checkpoint(a_ , a_ , a_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(a_ , a_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE : List[str] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE : Any = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
SCREAMING_SNAKE_CASE : str = '''gpt2'''
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = type(a_ ).__name__
SCREAMING_SNAKE_CASE : Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(a_ )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(a_ )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a_ , '''pytorch_model.bin''' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(a_ , a_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 698
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Any=10 , lowerCAmelCase : str=3 , lowerCAmelCase : Tuple=32 * 8 , lowerCAmelCase : Optional[Any]=32 * 8 , lowerCAmelCase : str=4 , lowerCAmelCase : Tuple=64 , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = hidden_dim
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase )
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase )
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase ) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase ) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ = self.num_queries
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = [1, 1, 1, 1]
UpperCAmelCase_ = self.num_channels
UpperCAmelCase_ = 64
UpperCAmelCase_ = 128
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
return config
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __A ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase ) , config.decoder_layers )
def __A ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Any=False ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase_ = MaskaFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , output_hidden_states=lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase , lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
def comm_check_on_output(lowerCAmelCase : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase )
comm_check_on_output(lowerCAmelCase )
UpperCAmelCase_ = model(
pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase )
comm_check_on_output(lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def __A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : Dict ):
'''simple docstring'''
pass
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=lowerCAmelCase ).long(),
}
UpperCAmelCase_ = self.model_tester.get_config()
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = model(**lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase )
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = model(**lowerCAmelCase , output_attentions=lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
UpperCAmelCase_ = model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase ).loss
loss.backward()
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(lowerCAmelCase ).to(lowerCAmelCase )
model.train()
UpperCAmelCase_ = model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase )
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_a: Any = 1e-4
def __lowerCAmelCase ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self : Dict ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __A ( self : Tuple ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase_ = inputs["pixel_values"].to(lowerCAmelCase )
UpperCAmelCase_ = [el.to(lowerCAmelCase ) for el in inputs["mask_labels"]]
UpperCAmelCase_ = [el.to(lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 715
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a: Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : List[str]=125 , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"<extra_id_{i}>" for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda lowerCAmelCase : bool("extra_id" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase_ = len(self.special_tokens_encoder )
UpperCAmelCase_ = len(lowerCAmelCase )
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = self.vocab_size + i - n
UpperCAmelCase_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + [1]
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def __A ( self : Any , lowerCAmelCase : List[int] ):
'''simple docstring'''
if len(lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
return token_ids_a + token_ids_a
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [chr(lowerCAmelCase ) for i in text.encode("utf-8" )]
return tokens
def __A ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
UpperCAmelCase_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = self.added_tokens_encoder[token]
elif len(lowerCAmelCase ) != 1:
UpperCAmelCase_ = self.unk_token_id
else:
UpperCAmelCase_ = ord(lowerCAmelCase ) + self._num_special_tokens
return token_id
def __A ( self : str , lowerCAmelCase : Any ):
'''simple docstring'''
if index in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[index]
else:
UpperCAmelCase_ = chr(index - self._num_special_tokens )
return token
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = b""
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
else:
UpperCAmelCase_ = bytes([ord(lowerCAmelCase )] )
bstring += tok_string
UpperCAmelCase_ = bstring.decode("utf-8" , errors="ignore" )
return string
def __A ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 268
| 0
|
from __future__ import annotations
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> int:
# Checks if the entire collection has been sorted
if len(lowerCamelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase_ , n - 1 )
rec_insertion_sort(lowerCamelCase_ , n - 1 )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
# Checks order between adjacent elements
if index >= len(lowerCamelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase , UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase_ , index + 1 )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = input("Enter integers separated by spaces: ")
__lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 323
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCamelCase : str = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__lowerCamelCase : Optional[Any] = dataset.iloc[:, 1:2].values
__lowerCamelCase : Any = dataset.iloc[:, 2].values
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCamelCase : List[Any] = PolynomialFeatures(degree=4)
__lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
__lowerCamelCase : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_() -> List[str]:
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color="red" )
plt.plot(lowerCamelCase_ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 323
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase__ : Any = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase__ : int = [0, 2_5, 5_0]
lowerCamelCase__ : List[str] = [2_5, 5_0, 7_5]
lowerCamelCase__ : List[Any] = fuzz.membership.trimf(X, abca)
lowerCamelCase__ : List[str] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase__ : List[Any] = np.ones(7_5)
lowerCamelCase__ : List[Any] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase__ : List[str] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase__ : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase__ : Optional[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase__ : Any = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase__ : Dict = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase__ : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase__ : Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 495
|
lowerCamelCase__ : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [False] * len(lowercase_ )
lowercase__ : List[Any] = [s]
lowercase__ : int = True
while queue:
lowercase__ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
lowercase__ : Optional[Any] = True
lowercase__ : int = u
return visited[t]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[int] = [-1] * (len(lowercase_ ))
lowercase__ : Optional[Any] = 0
lowercase__ : List[Any] = []
lowercase__ : Optional[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = float("""Inf""" )
lowercase__ : Tuple = sink
while s != source:
# Find the minimum value in select path
lowercase__ : List[str] = min(lowercase_ , graph[parent[s]][s] )
lowercase__ : List[str] = parent[s]
max_flow += path_flow
lowercase__ : str = sink
while v != source:
lowercase__ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ : Union[str, Any] = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 495
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 701
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = torch.permute(_UpperCAmelCase , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase):
# linear layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if "metadata" in layer:
SCREAMING_SNAKE_CASE = layer.split('metadata')
SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1]
SCREAMING_SNAKE_CASE = [tuple(('metadata' + split_layer[1]).split('/'))]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE = layer.split('kvstore')
SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1]
SCREAMING_SNAKE_CASE = [tuple(('kvstore' + split_layer[1]).split('/'))]
else:
SCREAMING_SNAKE_CASE = layer.split('/')
SCREAMING_SNAKE_CASE = '/'.join(split_layer[:-1])
SCREAMING_SNAKE_CASE = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE = 'file'
else:
SCREAMING_SNAKE_CASE = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = rename_keys(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME):
SCREAMING_SNAKE_CASE = convert_file_size_to_int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb') as fp:
SCREAMING_SNAKE_CASE = serialization.msgpack_restore(fp.read())['optimizer']['target']
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCAmelCase , sep='/')
SCREAMING_SNAKE_CASE = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE = content
else:
SCREAMING_SNAKE_CASE = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE = ts.open(unflatten_dict(all_layers[key])).result().read().result()
SCREAMING_SNAKE_CASE = torch.tensor(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_base_flax_keys(tuple(key.split('/')) , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = '/'.join(_UpperCAmelCase)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
del current_block
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase))
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(_UpperCAmelCase) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = weights_name.replace(
'.bin' , F'''-{idx+1:05d}-of-{len(_UpperCAmelCase):05d}.bin''') # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = shard
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {'total_size': total_size}
SCREAMING_SNAKE_CASE = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
return metadata, index
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a_ : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase__ ():
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained('google/switch-base-8')
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted')
SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto')
SCREAMING_SNAKE_CASE = TaTokenizer.from_pretrained('t5-small')
SCREAMING_SNAKE_CASE = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids
SCREAMING_SNAKE_CASE = model.generate(_UpperCAmelCase , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 73
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 83
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class A__ :
def __init__( self : Optional[int] , _a : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =list(_a )
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : List[Any] ) -> str:
'''simple docstring'''
return "(" + ",".join(map(_a , self.__components ) ) + ")"
def __add__( self : Optional[int] , _a : Vector ) -> Vector:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =len(self )
if size == len(_a ):
_SCREAMING_SNAKE_CASE =[self.__components[i] + other.component(_a ) for i in range(_a )]
return Vector(_a )
else:
raise Exception('must have the same size' )
def __sub__( self : List[str] , _a : Vector ) -> Vector:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =len(self )
if size == len(_a ):
_SCREAMING_SNAKE_CASE =[self.__components[i] - other.component(_a ) for i in range(_a )]
return Vector(_a )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Optional[Any] , _a : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : Union[str, Any] , _a : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : List[Any] , _a : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(_a , (float, int) ):
_SCREAMING_SNAKE_CASE =[c * other for c in self.__components]
return Vector(_a )
elif isinstance(_a , _a ) and len(self ) == len(_a ):
_SCREAMING_SNAKE_CASE =len(self )
_SCREAMING_SNAKE_CASE =[self.__components[i] * other.component(_a ) for i in range(_a )]
return sum(_a )
else: # error case
raise Exception('invalid operand!' )
def A ( self : Optional[int] ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def A ( self : List[Any] , _a : int ) -> float:
'''simple docstring'''
if isinstance(_a , _a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def A ( self : List[str] , _a : int , _a : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_SCREAMING_SNAKE_CASE =value
def A ( self : Tuple ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_SCREAMING_SNAKE_CASE =[c**2 for c in self.__components]
return math.sqrt(sum(_a ) )
def A ( self : Union[str, Any] , _a : Vector , _a : bool = False ) -> float:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self * other
_SCREAMING_SNAKE_CASE =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase )
return Vector([0] * dimension )
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (isinstance(_UpperCamelCase , _UpperCamelCase ))
_SCREAMING_SNAKE_CASE =[0] * dimension
_SCREAMING_SNAKE_CASE =1
return Vector(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : Vector , _UpperCamelCase : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (isinstance(_UpperCamelCase , (int, float) ))
)
return x * scalar + y
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> Vector:
"""simple docstring"""
random.seed(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[random.randint(_UpperCamelCase , _UpperCamelCase ) for _ in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
class A__ :
def __init__( self : int , _a : list[list[float]] , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =matrix
_SCREAMING_SNAKE_CASE =w
_SCREAMING_SNAKE_CASE =h
def __str__( self : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , _a : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_SCREAMING_SNAKE_CASE =[]
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE =[
self.__matrix[i][j] + other.component(_a , _a )
for j in range(self.__width )
]
matrix.append(_a )
return Matrix(_a , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : str , _a : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_SCREAMING_SNAKE_CASE =[]
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE =[
self.__matrix[i][j] - other.component(_a , _a )
for j in range(self.__width )
]
matrix.append(_a )
return Matrix(_a , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : Any , _a : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : Tuple , _a : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : int , _a : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(_a , _a ): # matrix-vector
if len(_a ) == self.__width:
_SCREAMING_SNAKE_CASE =zero_vector(self.__height )
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE =[
self.__matrix[i][j] * other.component(_a )
for j in range(self.__width )
]
ans.change_component(_a , sum(_a ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_a , (int, float) ): # matrix-scalar
_SCREAMING_SNAKE_CASE =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_a , self.__width , self.__height )
return None
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.__height
def A ( self : int ) -> int:
'''simple docstring'''
return self.__width
def A ( self : List[str] , _a : int , _a : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def A ( self : Optional[int] , _a : int , _a : int , _a : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_SCREAMING_SNAKE_CASE =value
else:
raise Exception('change_component: indices out of bounds' )
def A ( self : str , _a : int , _a : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_SCREAMING_SNAKE_CASE =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =minor[i][:y] + minor[i][y + 1 :]
return Matrix(_a , self.__width - 1 , self.__height - 1 ).determinant()
def A ( self : Union[str, Any] , _a : int , _a : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_a , _a )
else:
raise Exception('Indices out of bounds' )
def A ( self : Optional[Any] ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_SCREAMING_SNAKE_CASE =[
self.__matrix[0][y] * self.cofactor(0 , _a ) for y in range(self.__width )
]
return sum(_a )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[[0] * n for _ in range(_UpperCamelCase )]
return Matrix(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
random.seed(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[
[random.randint(_UpperCamelCase , _UpperCamelCase ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )
]
return Matrix(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
| 405
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_UpperCamelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__UpperCamelCase : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
UpperCamelCase__ : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase__ : str = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
UpperCamelCase__ : int = '''cpu'''
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = Path(SCREAMING_SNAKE_CASE )
# TEXT ENCODER
UpperCamelCase__ : List[str] = pipeline.text_encoder.config.max_position_embeddings
UpperCamelCase__ : Any = pipeline.text_encoder.config.hidden_size
UpperCamelCase__ : Tuple = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
UpperCamelCase__ : Any = pipeline.unet.config.in_channels
UpperCamelCase__ : Tuple = pipeline.unet.config.sample_size
UpperCamelCase__ : Union[str, Any] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=SCREAMING_SNAKE_CASE , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Any = str(unet_path.absolute().as_posix() )
UpperCamelCase__ : Any = os.path.dirname(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = onnx.load(SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE )
os.mkdir(SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_as_external_data=SCREAMING_SNAKE_CASE , all_tensors_to_one_file=SCREAMING_SNAKE_CASE , location='''weights.pb''' , convert_attribute=SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
UpperCamelCase__ : Dict = pipeline.vae
UpperCamelCase__ : Optional[int] = vae_encoder.config.in_channels
UpperCamelCase__ : Union[str, Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCamelCase__ : List[str] = lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : vae_encoder.encode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
# VAE DECODER
UpperCamelCase__ : List[Any] = pipeline.vae
UpperCamelCase__ : Tuple = vae_decoder.config.latent_channels
UpperCamelCase__ : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCamelCase__ : Optional[Any] = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCamelCase__ : int = pipeline.safety_checker
UpperCamelCase__ : str = safety_checker.config.vision_config.num_channels
UpperCamelCase__ : int = safety_checker.config.vision_config.image_size
UpperCamelCase__ : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
UpperCamelCase__ : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
UpperCamelCase__ : List[str] = pipeline.feature_extractor
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : int = None
UpperCamelCase__ : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
print('''ONNX pipeline saved to''' , SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
UpperCamelCase__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__UpperCamelCase : List[str] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 717
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[Any] = "roberta-prelayernorm"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=50265 , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Dict=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : List[Any]=1E-1_2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]="absolute" , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : int = classifier_dropout
class __magic_name__ ( __lowerCAmelCase):
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106
| 0
|
import functools
from typing import Any
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
if not isinstance(_A ,_A ) or len(_A ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(_A ,_A ) or not all(
isinstance(_A ,_A ) and len(_A ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
_lowercase = {}
_lowercase = """WORD_KEEPER"""
for word in words:
_lowercase = trie
for c in word:
if c not in trie_node:
_lowercase = {}
_lowercase = trie_node[c]
_lowercase = True
_lowercase = len(_A )
# Dynamic programming method
@functools.cache
def is_breakable(_A ) -> bool:
if index == len_string:
return True
_lowercase = trie
for i in range(_A ,_A ):
_lowercase = trie_node.get(string[i] ,_A )
if trie_node is None:
return False
if trie_node.get(_A ,_A ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A_: Tuple = logging.get_logger(__name__)
A_: Optional[Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'gpt_neo'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , UpperCAmelCase=50257 , UpperCAmelCase=2048 , UpperCAmelCase=2048 , UpperCAmelCase=24 , UpperCAmelCase=[[["global", "local"], 12]] , UpperCAmelCase=16 , UpperCAmelCase=None , UpperCAmelCase=256 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=50256 , UpperCAmelCase=50256 , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_layers
_lowercase = num_heads
_lowercase = intermediate_size
_lowercase = window_size
_lowercase = activation_function
_lowercase = resid_dropout
_lowercase = embed_dropout
_lowercase = attention_dropout
_lowercase = classifier_dropout
_lowercase = layer_norm_epsilon
_lowercase = initializer_range
_lowercase = use_cache
_lowercase = bos_token_id
_lowercase = eos_token_id
_lowercase = attention_types
_lowercase = self.expand_attention_types_params(UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
import torch
_lowercase = input.size()
_lowercase = len(_A )
_lowercase = shape[dimension]
_lowercase = torch.arange(0 ,_A ,_A )
_lowercase = torch.div(sizedim - size ,_A ,rounding_mode="""floor""" ) + 1
_lowercase = torch.arange(_A ) + low_indices[:min_length][:, None]
_lowercase = [slice(_A )] * rank
_lowercase = indices
_lowercase = input[s]
_lowercase = list(range(0 ,rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_A )
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
import torch
_lowercase = torch.arange(1 ,_A )
_lowercase = torch.remainder(_A ,_A )
_lowercase = remainders == 0
_lowercase = candidates[divisor_indices]
_lowercase = torch.max(_A )
return largest_divisor, torch.div(_A ,_A ,rounding_mode="""floor""" )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
_lowercase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowercase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self._config.num_heads
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
'''simple docstring'''
_lowercase = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowercase , _lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
_lowercase = common_inputs["""attention_mask"""]
if self.use_past:
_lowercase = ordered_inputs["""attention_mask"""].dtype
_lowercase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 13
| 398
| 1
|
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
_validate_point(__UpperCAmelCase )
_validate_point(__UpperCAmelCase )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__UpperCAmelCase , __UpperCAmelCase ) ) )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for item in point:
if not isinstance(__UpperCAmelCase , (int, float) ):
__SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
f"""{type(__UpperCAmelCase ).__name__}"""
)
raise TypeError(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = f"""Expected a list of numbers as input, found {type(__UpperCAmelCase ).__name__}"""
raise TypeError(__UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
_validate_point(__UpperCAmelCase )
_validate_point(__UpperCAmelCase )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__UpperCAmelCase , __UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , A__ : int ) -> str:
'''simple docstring'''
a__ : List[Any] = size
# approximate the overall size of segment tree with given value
a__ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a__ : Optional[Any] = [0 for i in range(0 , 4 * size )]
a__ : Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __lowerCAmelCase ( self : int , A__ : int ) -> Tuple:
'''simple docstring'''
return idx * 2
def __lowerCAmelCase ( self : str , A__ : int ) -> Tuple:
'''simple docstring'''
return idx * 2 + 1
def __lowerCAmelCase ( self : int , A__ : int , A__ : int , A__ : int , A__ : list[int] ) -> Optional[Any]:
'''simple docstring'''
if left_element == right_element:
a__ : Optional[Any] = a[left_element - 1]
else:
a__ : Union[str, Any] = (left_element + right_element) // 2
self.build(self.left(a__ ) , a__ , a__ , a__ )
self.build(self.right(a__ ) , mid + 1 , a__ , a__ )
a__ : Optional[int] = max(
self.segment_tree[self.left(a__ )] , self.segment_tree[self.right(a__ )] )
def __lowerCAmelCase ( self : Any , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int ) -> Optional[Any]:
'''simple docstring'''
if self.flag[idx] is True:
a__ : List[Any] = self.lazy[idx]
a__ : List[str] = False
if left_element != right_element:
a__ : Dict = self.lazy[idx]
a__ : int = self.lazy[idx]
a__ : Tuple = True
a__ : Tuple = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a__ : Optional[Any] = val
if left_element != right_element:
a__ : List[Any] = val
a__ : Optional[Any] = val
a__ : List[str] = True
a__ : Tuple = True
return True
a__ : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(a__ ) , a__ , a__ , a__ , a__ , a__ )
self.update(self.right(a__ ) , mid + 1 , a__ , a__ , a__ , a__ )
a__ : Dict = max(
self.segment_tree[self.left(a__ )] , self.segment_tree[self.right(a__ )] )
return True
def __lowerCAmelCase ( self : List[Any] , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int ) -> Dict:
'''simple docstring'''
if self.flag[idx] is True:
a__ : Tuple = self.lazy[idx]
a__ : int = False
if left_element != right_element:
a__ : List[Any] = self.lazy[idx]
a__ : Any = self.lazy[idx]
a__ : List[str] = True
a__ : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a__ : List[Any] = (left_element + right_element) // 2
a__ : str = self.query(self.left(a__ ) , a__ , a__ , a__ , a__ )
a__ : List[Any] = self.query(self.right(a__ ) , mid + 1 , a__ , a__ , a__ )
return max(a__ , a__ )
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
return str([self.query(1 , 1 , self.size , a__ , a__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
__SCREAMING_SNAKE_CASE = 1_5
__SCREAMING_SNAKE_CASE = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 688
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[float] , snake_case_ : list[float] ) -> float:
__snake_case = sorted(numsa + numsa )
__snake_case , __snake_case = divmod(len(snake_case_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = [float(x) for x in input('Enter the elements of first array: ').split()]
snake_case_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 592
| 0
|
'''simple docstring'''
import functools
from typing import Any
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> bool:
# Validation
if not isinstance(__A , __A ) or len(__A ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(__A , __A ) or not all(
isinstance(__A , __A ) and len(__A ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_snake_case = {}
_snake_case = 'WORD_KEEPER'
for word in words:
_snake_case = trie
for c in word:
if c not in trie_node:
_snake_case = {}
_snake_case = trie_node[c]
_snake_case = True
_snake_case = len(__A )
# Dynamic programming method
@functools.cache
def is_breakable(__A ) -> bool:
if index == len_string:
return True
_snake_case = trie
for i in range(__A , __A ):
_snake_case = trie_node.get(string[i] , __A )
if trie_node is None:
return False
if trie_node.get(__A , __A ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 542
| 1
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for e in env_keys:
lowerCamelCase__ =int(os.environ.get(__lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ =os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return strtobool(__lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase="no" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ =os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return value
| 530
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
A__ : List[Any] = StableDiffusionLDMaDPipeline
A__ : List[str] = TEXT_TO_IMAGE_PARAMS
A__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
lowerCamelCase__ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase__ =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase__ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase__ =CLIPTextModel(_lowerCamelCase )
lowerCamelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ =torch.manual_seed(_lowerCamelCase )
else:
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1]
lowerCamelCase__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ =np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCamelCase__ =np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _a ( self ):
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =3 * [inputs["prompt"]]
# forward
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ =depth_slice_a[0, -3:, -1]
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =3 * [inputs.pop("prompt" )]
lowerCamelCase__ =ldmad_pipe.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="pt" , )
lowerCamelCase__ =text_inputs["input_ids"].to(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.text_encoder(_lowerCamelCase )[0]
lowerCamelCase__ =prompt_embeds
# forward
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =PNDMScheduler(skip_prk_steps=_lowerCamelCase )
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ ="french fries"
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1]
lowerCamelCase__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ =np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCamelCase__ =np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase__ =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCamelCase__ =np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCamelCase__ =np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =0.4_9_5_5_8_6
lowerCamelCase__ =0.3_3_7_9_5_5_1_5
lowerCamelCase__ =1_1_2.4_8_5_1_8
lowerCamelCase__ =9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =0.4_1_9_4_1_2_7
lowerCamelCase__ =0.3_5_3_7_5_5_8_6
lowerCamelCase__ =0.5_6_3_8_5_0_2
lowerCamelCase__ =0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 530
| 1
|
'''simple docstring'''
class a_ :
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Tuple ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowercase__ ( self : Tuple ):
return self.value
def lowercase__ ( self : Any ):
return self.name
def lowercase__ ( self : Any ):
return self.weight
def lowercase__ ( self : Any ):
return self.value / self.weight
def lowerCamelCase__ ( a , a , a ):
__snake_case = []
for i in range(len(a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( a , a , a ):
__snake_case = sorted(a , key=a , reverse=a )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = '''poolformer'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : Union[str, Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : int=[7, 3, 3, 3] , __lowerCAmelCase : Any=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[2, 1, 1, 1] , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : Dict=0.02 , **__lowerCAmelCase : Dict , ):
__snake_case = num_channels
__snake_case = patch_size
__snake_case = stride
__snake_case = padding
__snake_case = pool_size
__snake_case = hidden_sizes
__snake_case = mlp_ratio
__snake_case = depths
__snake_case = patch_sizes
__snake_case = strides
__snake_case = num_encoder_blocks
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_layer_scale
__snake_case = layer_scale_init_value
__snake_case = initializer_range
super().__init__(**__lowerCAmelCase )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = version.parse('''1.11''' )
@property
def lowercase__ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Tuple ):
return 2E-3
| 427
| 1
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ = model
UpperCamelCase__ = 2
UpperCamelCase__ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case__ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_( _A :str , _A :str , _A :str )-> Optional[int]:
# load longformer model from model identifier
UpperCamelCase__ = LongformerModel.from_pretrained(UpperCAmelCase_ )
UpperCamelCase__ = LightningModel(UpperCAmelCase_ )
UpperCamelCase__ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCamelCase__ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 551
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = OpenAIGPTTokenizer
_lowerCamelCase = OpenAIGPTTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return "lower newer", "lower newer"
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = "lower"
lowerCamelCase_ = ["low", "er</w>"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokens + ["<unk>"]
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self , UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
lowerCamelCase_ = "This is a simple input"
lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ = ("This is a simple input", "This is a pair")
lowerCamelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
def snake_case ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( lowercase ):
"""simple docstring"""
pass
| 675
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a ( A__ , A__ , A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE__ : Any = load_file(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE__ : int = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
SCREAMING_SNAKE_CASE__ : int = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
SCREAMING_SNAKE_CASE__ : Any = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_infos.pop(0 )
while len(_UpperCamelCase ) > -1:
try:
SCREAMING_SNAKE_CASE__ : int = curr_layer.__getattr__(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : int = layer_infos.pop(0 )
elif len(_UpperCamelCase ) == 0:
break
except Exception:
if len(_UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE__ : int = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE__ : List[str] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(_UpperCamelCase )
else:
pair_keys.append(_UpperCamelCase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE__ : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE__ : Any = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCamelCase )
return pipeline
if __name__ == "__main__":
a_ :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.7_5, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
a_ :int = parser.parse_args()
a_ :Optional[int] = args.base_model_path
a_ :List[Any] = args.checkpoint_path
a_ :List[Any] = args.dump_path
a_ :Optional[int] = args.lora_prefix_unet
a_ :Optional[int] = args.lora_prefix_text_encoder
a_ :Any = args.alpha
a_ :Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
a_ :Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 716
|
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : List[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : Union[str, Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [[0 for _ in range(__SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase = 1
for n in range(m + 1 ):
for k in range(1 , __SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 84
|
from itertools import count
def a(lowercase__ = 50 ):
'''simple docstring'''
snake_case_ = [1] * min_block_length
for n in count(lowercase__ ):
fill_count_functions.append(1 )
for block_length in range(lowercase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 187
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : float ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase__ , lowerCamelCase__ , F"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Dict ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
UpperCamelCase__ : List[str] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : int=None , **lowerCamelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
UpperCamelCase__ : Dict = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : str = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Tuple = after_output[0]
UpperCamelCase__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
UpperCamelCase__ : int = model(
input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_attentions=lowerCamelCase__ )
UpperCamelCase__ : str = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : List[Any] = to_atuple(vision_model.config.image_size )
UpperCamelCase__ : Tuple = to_atuple(vision_model.config.patch_size )
UpperCamelCase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase__ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCamelCase__ )
pt_model.eval()
# prepare inputs
UpperCamelCase__ : int = inputs_dict
UpperCamelCase__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase__ : Optional[int] = pt_model(**lowerCamelCase__ ).to_tuple()
UpperCamelCase__ : Optional[Any] = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
UpperCamelCase__ : Tuple = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : List[str] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
pt_model_loaded.to(lowerCamelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase__ : int = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output_loaded.numpy() , 4E-2 )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = VisionTextDualEncoderModel(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
UpperCamelCase__ : Dict = fx_state
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = VisionTextDualEncoderModel(lowerCamelCase__ )
UpperCamelCase__ : Any = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
UpperCamelCase__ : str = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ : str = config_inputs_dict.pop('''vision_config''' )
UpperCamelCase__ : Any = config_inputs_dict.pop('''text_config''' )
UpperCamelCase__ : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.check_equivalence_flax_to_pt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_pretrained_model_and_inputs()
UpperCamelCase__ : Optional[int] = model_a(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : List[str] = model_a(**lowerCamelCase__ )
UpperCamelCase__ : List[str] = after_outputs[0]
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@require_flax
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
UpperCamelCase__ : Dict = 13
UpperCamelCase__ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase__ : Dict = random_attention_mask([batch_size, 4] )
UpperCamelCase__ : str = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = FlaxViTModel(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[str] = FlaxViTModelTester(self )
UpperCamelCase__ : Optional[int] = FlaxBertModelTester(self )
UpperCamelCase__ : str = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Tuple = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Union[str, Any] = vision_config_and_inputs
UpperCamelCase__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
UpperCamelCase__ : Any = 13
UpperCamelCase__ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
UpperCamelCase__ : Any = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = FlaxCLIPVisionModel(lowerCamelCase__ )
UpperCamelCase__ : Dict = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = FlaxCLIPVisionModelTester(self )
UpperCamelCase__ : Optional[Any] = FlaxBertModelTester(self )
UpperCamelCase__ : List[Any] = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ : List[str] = vision_config_and_inputs
UpperCamelCase__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
UpperCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCamelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase__ : Optional[Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Optional[Any] = model(**lowerCamelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase__ : Union[str, Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase__ , atol=1E-3 ) )
| 700
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = ["image_processor", "tokenizer"]
A: Optional[Any] = "LayoutLMv2ImageProcessor"
A: List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCamelCase__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ : Optional[int] = features['''words''']
UpperCamelCase__ : Optional[int] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
UpperCamelCase__ : Optional[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ : Union[str, Any] = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ : Tuple = images
return encoded_inputs
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}" )
return images_with_overflow
def UpperCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 106
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=19, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=lowerCAmelCase, esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False}, )
return config
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =False
lowercase : str =(EsmForProteinFolding,) if is_torch_available() else ()
lowercase : Optional[Any] =()
lowercase : List[str] ={} if is_torch_available() else {}
lowercase : Tuple =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =EsmFoldModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip('''Does not support attention outputs''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
class __UpperCamelCase ( lowerCamelCase__ ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCamelCase_ =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ =model(lowerCAmelCase )['''positions''']
lowerCamelCase_ =torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], lowerCAmelCase, atol=1e-4 ) )
| 676
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[Any] ):
debug_launcher(test_script.main )
def lowerCAmelCase_ ( self : Any ):
debug_launcher(test_ops.main )
| 709
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase ,id=lowercase )
| 275
| 0
|
from collections import defaultdict
def _a ( lowerCAmelCase , lowerCAmelCase )-> bool:
SCREAMING_SNAKE_CASE_ = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ = first_str.replace(' ' , '' )
SCREAMING_SNAKE_CASE_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ = defaultdict(lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE: Dict = input('''Enter the first string ''').strip()
SCREAMING_SNAKE_CASE: str = input('''Enter the second string ''').strip()
SCREAMING_SNAKE_CASE: Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 360
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE: Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _a ( lowerCAmelCase )-> str:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(lowerCAmelCase , lowerCAmelCase )
return k
def _a ( lowerCAmelCase , lowerCAmelCase )-> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(lowerCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _a ( lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ['Adafactor', 'global_step']
for name, shape in tqdm(lowerCAmelCase , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[Any]:
# save tokenizer first
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(lowerCAmelCase , lowerCAmelCase )
torch_model.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(lowerCAmelCase , Path(lowerCAmelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE: int = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE: List[Any] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE: Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 360
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=lowercase ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=lowercase ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=lowercase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
snake_case : Union[str, Any] = parse_args()
# Import training_script as a module.
snake_case : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case : Union[str, Any] = script_fpath.stem
snake_case : Optional[Any] = importlib.import_module(lowercase )
# Patch sys.argv
snake_case : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 684
|
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 342
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'convbert'
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=768 , __snake_case=2 , __snake_case=9 , __snake_case=1 , __snake_case=None , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =initializer_range
__a =layer_norm_eps
__a =embedding_size
__a =head_ratio
__a =conv_kernel_size
__a =num_groups
__a =classifier_dropout
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 242
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {}
class _UpperCamelCase ( _UpperCAmelCase):
'''simple docstring'''
_snake_case = """llama"""
_snake_case = ["""past_key_values"""]
def __init__( self , a_=3_2_0_0_0 , a_=4_0_9_6 , a_=1_1_0_0_8 , a_=3_2 , a_=3_2 , a_=None , a_="silu" , a_=2_0_4_8 , a_=0.02 , a_=1e-6 , a_=True , a_=0 , a_=1 , a_=2 , a_=1 , a_=False , a_=None , **a_ , ) -> Tuple:
lowercase : Tuple = vocab_size
lowercase : Dict = max_position_embeddings
lowercase : List[Any] = hidden_size
lowercase : List[Any] = intermediate_size
lowercase : int = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowercase : int = num_attention_heads
lowercase : Tuple = num_key_value_heads
lowercase : Optional[Any] = hidden_act
lowercase : Any = initializer_range
lowercase : Any = rms_norm_eps
lowercase : Tuple = pretraining_tp
lowercase : List[Any] = use_cache
lowercase : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def a__ ( self ) -> Any:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowercase : int = self.rope_scaling.get("type" , lowercase__ )
lowercase : Optional[Any] = self.rope_scaling.get("factor" , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 704
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = IFImgaImgSuperResolutionPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''})
_snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def a__ ( self , a_ , a_=0 ) -> Union[str, Any]:
if str(a_ ).startswith("mps" ):
lowercase : Dict = torch.manual_seed(a_ )
else:
lowercase : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowercase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a_ ) ).to(a_ )
lowercase : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(a_ ) ).to(a_ )
lowercase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a__ ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self ) -> List[Any]:
self._test_save_load_local()
def a__ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 425
| 0
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[Any], UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(UpperCAmelCase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
A__ = pipeline.text_encoder
else:
A__ = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(UpperCAmelCase_ ) > -1:
try:
A__ = curr_layer.__getattr__(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(UpperCAmelCase_ ) == 0:
break
except Exception:
if len(UpperCAmelCase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down", "lora_up" ) )
pair_keys.append(UpperCAmelCase_ )
else:
pair_keys.append(UpperCAmelCase_ )
pair_keys.append(key.replace("lora_up", "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_, UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_, UpperCAmelCase_ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase_ )
return pipeline
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.base_model_path
UpperCamelCase = args.checkpoint_path
UpperCamelCase = args.dump_path
UpperCamelCase = args.lora_prefix_unet
UpperCamelCase = args.lora_prefix_text_encoder
UpperCamelCase = args.alpha
UpperCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 104
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : Any = PriorTransformer
snake_case_ : List[str] = """hidden_states"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> Any:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
__snake_case = model.to(SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
__snake_case = self.get_dummy_seed_input()
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
__snake_case = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
@slow
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : Dict=7_7 , SCREAMING_SNAKE_CASE : int=0 ) -> List[str]:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = batch_size
__snake_case = embedding_dim
__snake_case = num_embeddings
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
__snake_case = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
| 371
| 0
|
_UpperCamelCase : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase : Optional[int] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def __UpperCamelCase ( snake_case , snake_case , snake_case ) -> str:
'''simple docstring'''
assert len(str(snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case = 1_0_0 , ) -> float:
'''simple docstring'''
__A = x_start
__A = fnc(snake_case )
__A = 0.0
for _ in range(snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
__A = (x_end - x_start) / steps + xa
__A = fnc(snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__A = xa
__A = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase ( snake_case ) -> int:
'''simple docstring'''
return math.sin(1_0 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_UpperCamelCase : Dict = 1_0
while i <= 1_0_0_0_0_0:
print(F"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 341
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['model.decoder.embed_positions.weights']
def _UpperCAmelCase (UpperCamelCase__ : List[Any] ):
if "emb" in name:
_A : Dict = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_A : str = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_A : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_A : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
_A : List[Any] = name.replace("linear2" , "fc2" )
if "norm1" in name:
_A : Tuple = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_A : Tuple = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_A : str = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_A : List[Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_A : Optional[int] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_A : Tuple = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def _UpperCAmelCase (UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ):
_A : Optional[int] = list(state_dict.keys() )
_A : List[str] = {}
for key in keys:
_A : Tuple = state_dict.pop(UpperCamelCase__ )
_A : List[str] = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
_A : int = val[:hidden_size, :]
_A : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_A : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_A : Tuple = val
else:
_A : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def _UpperCAmelCase (UpperCamelCase__ : str ):
if checkpoint == "small":
# default config values
_A : Tuple = 1024
_A : Dict = 24
_A : Dict = 16
elif checkpoint == "medium":
_A : Optional[Any] = 1536
_A : Optional[Any] = 48
_A : Any = 24
elif checkpoint == "large":
_A : Optional[int] = 2048
_A : str = 48
_A : List[str] = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_A : Tuple = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="cpu" ):
_A : Dict = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
_A : List[str] = decoder_config_from_checkpoint(UpperCamelCase__ )
_A : Dict = fairseq_model.lm.state_dict()
_A , _A : List[Any] = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
_A : int = TaEncoderModel.from_pretrained("t5-base" )
_A : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_A : List[Any] = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_A , _A : Optional[int] = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_A : Optional[int] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
_A : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_A : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_A : Any = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_A : Any = AutoTokenizer.from_pretrained("t5-base" )
_A : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_A : Tuple = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
_A : Any = 2048
_A : Any = 2048
# set other default generation config params
_A : List[Any] = int(30 * audio_encoder.config.frame_rate )
_A : Optional[Any] = True
_A : Any = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 503
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = tmp_path / "cache"
_A : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : List[str] = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ):
_A : Dict = tmp_path / "cache"
_A : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : Optional[Any] = features.copy() if features else default_expected_features
_A : Union[str, Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
_A : Any = tmp_path / "cache"
_A : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : List[str] = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[int] = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[int] = [parquet_path]
_A : Dict = tmp_path / "cache"
_A : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : Tuple = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=("train",) ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
_A : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
_A : Tuple = tmp_path / "cache"
_A : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : List[str] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
_A : Optional[int] = tmp_path / "cache"
_A : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : str = features.copy() if features else default_expected_features
_A : Any = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = ParquetDatasetReader({"train": parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if split:
_A : Any = {split: parquet_path}
else:
_A : Optional[Any] = "train"
_A : int = {"train": parquet_path, "test": parquet_path}
_A : Any = tmp_path / "cache"
_A : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : Dict = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
_A : Union[str, Any] = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
_A : List[Any] = pq.ParquetFile(tmp_path / "foo.parquet" )
_A : List[str] = pf.read()
assert dataset.data.table == output_table
def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
_A : Any = str(shared_datadir / "test_image_rgb.jpg" )
_A : Dict = {"image": [image_path]}
_A : Union[str, Any] = Features({"image": Image()} )
_A : List[Any] = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
_A : Any = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
_A : Optional[int] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_A : Union[str, Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 503
| 1
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : List[str] = FileLock(str(tmpdir / 'foo.lock' ) )
__A : Tuple = FileLock(str(tmpdir / 'foo.lock' ) )
__A : Any = 0.01
with locka.acquire():
with pytest.raises(__snake_case ):
__A : str = time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[str]:
__A : Optional[int] = 'a' * 10_00 + '.lock'
__A : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
__A : List[str] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 338
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Optional[Any] = batch_size
__A : Union[str, Any] = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : int = is_training
__A : List[Any] = use_labels
__A : Optional[int] = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = intermediate_size
__A : Any = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Union[str, Any] = type_sequence_label_size
__A : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : Dict = num_patches + 1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = FlaxViTModel(config=_UpperCAmelCase)
__A : Any = model(_UpperCAmelCase)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__A : int = (self.image_size, self.image_size)
__A : Any = (self.patch_size, self.patch_size)
__A : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.type_sequence_label_size
__A : List[Any] = FlaxViTForImageClassification(config=_UpperCAmelCase)
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__A : List[str] = 1
__A : Optional[int] = FlaxViTForImageClassification(_UpperCAmelCase)
__A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__A : Optional[int] = model(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = FlaxViTModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_UpperCAmelCase)
__A : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[Any] = model_class(_UpperCAmelCase)
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase)
with self.subTest('JIT Enabled'):
__A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple()
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A : Optional[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224')
__A : Dict = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(_UpperCAmelCase)
| 338
| 1
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''efficientnet'''
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.2_5 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.0_2 , _a = 0.0_0_1 , _a = 0.9_9 , _a = 0.5 , _a = 0.2 , **_a , ) -> List[str]:
super().__init__(**_a )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = width_coefficient
lowerCAmelCase_ = depth_coefficient
lowerCAmelCase_ = depth_divisor
lowerCAmelCase_ = kernel_sizes
lowerCAmelCase_ = in_channels
lowerCAmelCase_ = out_channels
lowerCAmelCase_ = depthwise_padding
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_block_repeats
lowerCAmelCase_ = expand_ratios
lowerCAmelCase_ = squeeze_expansion_ratio
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = pooling_type
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = batch_norm_eps
lowerCAmelCase_ = batch_norm_momentum
lowerCAmelCase_ = dropout_rate
lowerCAmelCase_ = drop_connect_rate
lowerCAmelCase_ = sum(_a ) * 4
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> float:
return 1E-5
| 122
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCAmelCase_ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ = ldmad_pipe.tokenizer(
_a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
lowerCAmelCase_ = text_inputs["input_ids"].to(_a )
lowerCAmelCase_ = ldmad_pipe.text_encoder(_a )[0]
lowerCAmelCase_ = prompt_embeds
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = "french fries"
lowerCAmelCase_ = ldmad_pipe(**_a , negative_prompt=_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCAmelCase_ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Dict:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase_ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCAmelCase_ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Union[str, Any]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> str:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_9_5_5_8_6
lowerCAmelCase_ = 0.3_3_7_9_5_5_1_5
lowerCAmelCase_ = 1_1_2.4_8_5_1_8
lowerCAmelCase_ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __a ( self ) -> Dict:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_1_9_4_1_2_7
lowerCAmelCase_ = 0.3_5_3_7_5_5_8_6
lowerCAmelCase_ = 0.5_6_3_8_5_0_2
lowerCAmelCase_ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 122
| 1
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( a : Union[str, Any] , a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Any = BertConfig.from_json_file(a )
print(f'Building PyTorch model from configuration: {config}' )
_snake_case : Optional[Any] = BertForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a , a , a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 704
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""YolosFeatureExtractor"""]
_a : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87
| 0
|
"""simple docstring"""
import os
def lowerCamelCase__ ( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = os.path.dirname(os.path.realpath(snake_case__ ) )
UpperCamelCase = os.path.join(snake_case__ , "triangle.txt" )
with open(snake_case__ ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = []
for line in triangle:
UpperCamelCase = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
for j in range(len(a[i] ) ):
UpperCamelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ , snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 554
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : str = '''luke'''
def __init__(self , UpperCAmelCase=5_0_2_6_7 , UpperCAmelCase=5_0_0_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=2_5_6 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =entity_vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =entity_emb_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =use_entity_aware_attention
__UpperCAmelCase =classifier_dropout
| 132
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , ):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_pad
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
__UpperCamelCase = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
__UpperCamelCase = image.size
else:
__UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase = int(self.size['shortest_edge'] * h / w )
__UpperCamelCase = self.size['shortest_edge']
elif w > h:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = self.size['shortest_edge']
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
__UpperCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __A , unittest.TestCase ):
lowercase = DetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = DetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'rescale_factor' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
__UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
__UpperCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
__UpperCamelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__UpperCamelCase = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
__UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
__UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
__UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCamelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__UpperCamelCase = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
__UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
__UpperCamelCase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
__UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
__UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 704
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A ( snake_case :Optional[int] ) -> str:
__UpperCamelCase = torch.exp(snake_case )
__UpperCamelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
__UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = config.output_attentions
__UpperCamelCase = config.output_hidden_states
__UpperCamelCase = nn.ModuleList([BertLayer(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase = nn.ModuleList([BertHighway(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if (type(__UpperCAmelCase ) is float) or (type(__UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase = x
else:
__UpperCamelCase = x
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = ()
__UpperCamelCase = ()
__UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase = all_hidden_states + (hidden_states,)
__UpperCamelCase = layer_module(
__UpperCAmelCase , __UpperCAmelCase , head_mask[i] , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase = all_attentions + (layer_outputs[1],)
__UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase = current_outputs + (all_attentions,)
__UpperCamelCase = self.highway[i](__UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase = highway_exit[0]
__UpperCamelCase = entropy(__UpperCAmelCase )
__UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__UpperCAmelCase , i + 1 )
else:
__UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase = all_hidden_states + (hidden_states,)
__UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase = outputs + (all_attentions,)
__UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config
__UpperCamelCase = BertEmbeddings(__UpperCAmelCase )
__UpperCamelCase = DeeBertEncoder(__UpperCAmelCase )
__UpperCamelCase = BertPooler(__UpperCAmelCase )
self.init_weights()
def UpperCAmelCase ( self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = value
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase = torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase = self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase = encoder_attention_mask[:, None, None, :]
__UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase = self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase = self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
__UpperCamelCase = self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(__UpperCAmelCase )
__UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = message
__UpperCamelCase = exit_layer # start from 1!
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = BertPooler(__UpperCAmelCase )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(__UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase = bmodel_output[1]
__UpperCamelCase = self.dropout(__UpperCAmelCase )
__UpperCamelCase = self.classifier(__UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeBertModel(__UpperCAmelCase )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.bert(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(__UpperCAmelCase )
__UpperCamelCase = self.classifier(__UpperCAmelCase )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 293
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''FlavaImageProcessor'''
UpperCAmelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , lowercase__ : Optional[Any]=None , lowercase__ : Optional[int]=None , **lowercase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase__ , )
_UpperCamelCase : List[str] = kwargs.pop("feature_extractor" )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase__ , lowercase__ )
_UpperCamelCase : Optional[Any] = self.image_processor
def __call__( self : List[Any] , lowercase__ : Optional[ImageInput] = None , lowercase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowercase__ : bool = True , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Union[bool, str, TruncationStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : int = 0 , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : bool = True , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Tuple , ) ->str:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCamelCase : Union[str, Any] = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if images is not None:
_UpperCamelCase : int = self.image_processor(
lowercase__ , return_image_mask=lowercase__ , return_codebook_pixels=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if text is not None and images is not None:
encoding.update(lowercase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def snake_case__ ( self : int , *lowercase__ : List[Any] , **lowercase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def snake_case__ ( self : Dict , *lowercase__ : Optional[int] , **lowercase__ : Any ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def snake_case__ ( self : str ) ->int:
'''simple docstring'''
_UpperCamelCase : Dict = self.tokenizer.model_input_names
_UpperCamelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__ ( self : str ) ->Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase__ , )
return self.image_processor_class
@property
def snake_case__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase__ , )
return self.image_processor
| 435
|
'''simple docstring'''
import numpy as np
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = 1E-12 ,UpperCAmelCase = 1_0_0 ,) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(UpperCAmelCase )[0] == np.shape(UpperCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase )[0] == np.shape(UpperCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase ) == np.iscomplexobj(UpperCAmelCase )
_UpperCamelCase : int = np.iscomplexobj(UpperCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCamelCase : str = False
_UpperCamelCase : Any = 0
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = 1E12
while not convergence:
# Multiple matrix by the vector.
_UpperCamelCase : str = np.dot(UpperCAmelCase ,UpperCAmelCase )
# Normalize the resulting output vector.
_UpperCamelCase : int = w / np.linalg.norm(UpperCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCamelCase : Any = vector.conj().T if is_complex else vector.T
_UpperCamelCase : Dict = np.dot(UpperCAmelCase ,np.dot(UpperCAmelCase ,UpperCAmelCase ) )
# Check convergence.
_UpperCamelCase : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : int = lambda_
if is_complex:
_UpperCamelCase : List[Any] = np.real(lambda_ )
return lambda_, vector
def __A ( ) -> None:
'''simple docstring'''
_UpperCamelCase : Any = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
_UpperCamelCase : str = np.array([4_1, 4, 2_0] )
_UpperCamelCase : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
_UpperCamelCase : List[str] = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCamelCase : Optional[int] = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCamelCase : Tuple = real_input_matrix
_UpperCamelCase : List[Any] = real_vector
elif problem_type == "complex":
_UpperCamelCase : Optional[int] = complex_input_matrix
_UpperCamelCase : Any = complex_vector
# Our implementation.
_UpperCamelCase , _UpperCamelCase : Any = power_iteration(UpperCAmelCase ,UpperCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCamelCase , _UpperCamelCase : Any = np.linalg.eigh(UpperCAmelCase )
# Last eigenvalue is the maximum one.
_UpperCamelCase : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCamelCase : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase ) - np.abs(UpperCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 435
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = BlipImageProcessor()
__lowerCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__lowerCAmelCase = BlipProcessor(__UpperCamelCase , __UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__UpperCamelCase )-> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).tokenizer
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor
def __UpperCAmelCase ( self )-> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__UpperCamelCase , return_tensors="np" )
__lowerCAmelCase = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self )-> Tuple:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = processor(text=__UpperCamelCase )
__lowerCAmelCase = tokenizer(__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__UpperCamelCase )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 290
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCamelCase (a_ ):
snake_case_ = """biogpt"""
def __init__( self , __UpperCamelCase=4_2_3_8_4 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=2_4 , __UpperCamelCase=1_6 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Optional[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = scale_embedding
__lowerCAmelCase = use_cache
__lowerCAmelCase = layerdrop
__lowerCAmelCase = activation_dropout
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
| 290
| 1
|
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
| 1
|
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> Tuple:
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
_a : List[str] = F"""Expected string as input, found {type(__UpperCamelCase ).__name__}"""
raise TypeError(__UpperCamelCase )
_a : Optional[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
a__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __lowercase ( self , _a , _a , _a = CHRF.CHAR_ORDER , _a = CHRF.WORD_ORDER , _a = CHRF.BETA , _a = False , _a = False , _a = False , ) -> Union[str, Any]:
_a : int = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a : int = [[refs[i] for refs in references] for i in range(_a )]
_a : str = CHRF(_a , _a , _a , _a , _a , _a )
_a : Optional[Any] = sb_chrf.corpus_score(_a , _a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 578
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """audio-spectrogram-transformer"""
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=16 , snake_case=True , snake_case=10 , snake_case=10 , snake_case=1024 , snake_case=128 , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = qkv_bias
lowercase = frequency_stride
lowercase = time_stride
lowercase = max_length
lowercase = num_mel_bins
| 84
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_UpperCamelCase = True
except (ImportError, AttributeError):
_UpperCamelCase = object
def a_ ( *_lowerCAmelCase ,**_lowerCAmelCase ) -> Union[str, Any]:
pass
_UpperCamelCase = False
_UpperCamelCase = logging.get_logger('transformers-cli/serving')
def a_ ( _lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : List[Any] = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(_lowerCAmelCase ,args.host ,args.port ,args.workers )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =42
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
def _lowercase ( _a : ArgumentParser ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=_a , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=_a , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=_a , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=_a , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=_a , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=_a , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=_a , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=_a , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=_a )
def __init__( self : int , _a : Pipeline , _a : str , _a : int , _a : int ) -> Optional[int]:
__lowerCamelCase : int = pipeline
__lowerCamelCase : List[Any] = host
__lowerCamelCase : int = port
__lowerCamelCase : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f'Serving model over {host}:{port}' )
__lowerCamelCase : Optional[Any] = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=_a , response_class=_a , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=_a , response_class=_a , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=_a , response_class=_a , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=_a , response_class=_a , methods=['POST'] , ),
] , timeout=600 , )
def _lowercase ( self : Optional[Any] ) -> List[str]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowercase ( self : Tuple ) -> int:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowercase ( self : str , _a : str = Body(_a , embed=_a ) , _a : bool = Body(_a , embed=_a ) ) -> Optional[int]:
try:
__lowerCamelCase : Dict = self._pipeline.tokenizer.tokenize(_a )
if return_ids:
__lowerCamelCase : int = self._pipeline.tokenizer.convert_tokens_to_ids(_a )
return ServeTokenizeResult(tokens=_a , tokens_ids=_a )
else:
return ServeTokenizeResult(tokens=_a )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_a )} )
def _lowercase ( self : Optional[Any] , _a : List[int] = Body(_a , embed=_a ) , _a : bool = Body(_a , embed=_a ) , _a : bool = Body(_a , embed=_a ) , ) -> List[Any]:
try:
__lowerCamelCase : Dict = self._pipeline.tokenizer.decode(_a , _a , _a )
return ServeDeTokenizeResult(model='' , text=_a )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_a )} )
async def _lowercase ( self : Dict , _a : Any=Body(_a , embed=_a ) ) -> Optional[int]:
# Check we don't have empty string
if len(_a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCamelCase : Union[str, Any] = self._pipeline(_a )
return ServeForwardResult(output=_a )
except Exception as e:
raise HTTPException(500 , {'error': str(_a )} )
| 459
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a= {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a= {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 287
| 1
|
def lowerCAmelCase_ ( lowerCamelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0 ) -> int:
lowercase__: Dict = sum(i * i for i in range(1 , n + 1 ) )
lowercase__: int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 586
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE = []
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __a ( _snake_case, _snake_case ):
'''simple docstring'''
__UpperCamelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : str = 2
@register_to_config
def __init__( self : int ,lowerCamelCase : int = 1000 ,lowerCamelCase : float = 0.00_085 ,lowerCamelCase : float = 0.012 ,lowerCamelCase : str = "linear" ,lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : float = 1.0 ,lowerCamelCase : str = "linspace" ,lowerCamelCase : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase ,dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ,alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ,alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = use_karras_sigmas
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Dict=None ):
'''simple docstring'''
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(lowerCamelCase ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ,lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase ,dtype=lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(lowerCamelCase ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE = np.log(lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.interp(lowerCamelCase ,np.arange(0 ,len(lowerCamelCase ) ) ,lowerCamelCase )
if self.config.use_karras_sigmas:
__SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=lowerCamelCase ,num_inference_steps=self.num_inference_steps )
__SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(lowerCamelCase ,lowerCamelCase ) for sigma in sigmas] )
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase ).startswith("""mps""" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = timesteps.to(lowerCamelCase ,dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = timesteps.to(device=lowerCamelCase )
# empty dt and derivative
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.log(lowerCamelCase )
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = np.clip(lowerCamelCase ,0 ,1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
__SCREAMING_SNAKE_CASE = in_sigmas[0].item()
__SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
__SCREAMING_SNAKE_CASE = np.linspace(0 ,1 ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
__SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
__SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.dt is None
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : Union[float, torch.FloatTensor] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : bool = True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase )
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = derivative
__SCREAMING_SNAKE_CASE = dt
__SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
__SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__SCREAMING_SNAKE_CASE = self.dt
__SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCamelCase ,lowerCamelCase ) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 705
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13
| 0
|
from collections import defaultdict
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
_A = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_A = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case_ ) )
]
_A = defaultdict(snake_case_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_A = (1 << len(snake_case_ )) - 1
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_A = self.count_ways_until(snake_case_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_A = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , snake_case_ ):
# Store the list of persons for each task
for i in range(len(snake_case_ ) ):
for j in task_performed[i]:
self.task[j].append(snake_case_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__A : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__A : Any = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 27
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCAmelCase__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowerCAmelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
lowerCAmelCase__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowerCAmelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowerCAmelCase_ ):
pass
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowerCAmelCase_ ) is None
with patch_submodule(_test_patching , "len" , lowerCAmelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = "__test_patch_submodule_start_and_stop_mock__"
lowerCAmelCase__ = patch_submodule(_test_patching , "open" , lowerCAmelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCAmelCase__ = "__test_patch_submodule_successive_join__"
lowerCAmelCase__ = "__test_patch_submodule_successive_dirname__"
lowerCAmelCase__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowerCAmelCase_ ):
with patch_submodule(_test_patching , "os.rename" , lowerCAmelCase_ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowerCAmelCase_ ):
with patch_submodule(_test_patching , "os.path.join" , lowerCAmelCase_ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowerCAmelCase_ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowerCAmelCase_ ):
pass
| 720
|
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _A ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 125
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self : int , _A : List[Any] , _A : Union[str, Any]=13 , _A : str=32 , _A : int=2 , _A : Optional[int]=3 , _A : List[Any]=16 , _A : int=[32, 64, 1_28] , _A : Union[str, Any]=[1, 2, 1] , _A : int=[2, 2, 4] , _A : Optional[int]=2 , _A : Any=2.0 , _A : Optional[int]=True , _A : List[str]=0.0 , _A : int=0.0 , _A : Union[str, Any]=0.1 , _A : List[Any]="gelu" , _A : Dict=False , _A : Optional[int]=True , _A : Dict=0.02 , _A : int=1e-5 , _A : Any=True , _A : Optional[int]=None , _A : Any=True , _A : Optional[Any]=10 , _A : Dict=8 , _A : List[Any]=["stage1", "stage2"] , _A : List[str]=[1, 2] , ) -> Dict:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Optional[int] = embed_dim
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Dict = window_size
UpperCAmelCase_ : Optional[Any] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : str = use_absolute_embeddings
UpperCAmelCase_ : str = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Any = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Any:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A ( self : List[str] , _A : int , _A : Union[str, Any] , _A : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = FocalNetModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[Any] = model(_A )
UpperCAmelCase_ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A ( self : List[Any] , _A : Optional[Any] , _A : List[str] , _A : Dict ) -> str:
UpperCAmelCase_ : Union[str, Any] = FocalNetBackbone(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = FocalNetBackbone(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Optional[int] = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : int , _A : Dict , _A : Tuple , _A : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = FocalNetForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Dict = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : str = FocalNetForMaskedImageModeling(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A ( self : Any , _A : int , _A : List[str] , _A : int ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = FocalNetForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Any = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Dict = FocalNetForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : str = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Any ) -> str:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __A , __A , unittest.TestCase):
a_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a_ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : List[Any] = FocalNetModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=_A , embed_dim=37 , has_text_modality=_A )
def A ( self : str ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[Any] ) -> List[Any]:
return
def A ( self : Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : List[Any] ) -> int:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def A ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Dict = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Union[str, Any] , _A : Union[str, Any] , _A : str , _A : Optional[Any] , _A : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_A ) , _A )
# FocalNet has a different seq_length
UpperCAmelCase_ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase_ : str = outputs.reshaped_hidden_states
self.assertEqual(len(_A ) , _A )
UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : int = (
reshaped_hidden_states[0].view(_A , _A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Tuple = True
self.check_hidden_states_output(_A , _A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(_A , _A , _A , _A )
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : int = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
@slow
def A ( self : int ) -> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = FocalNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : Dict ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(config=_A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : List[Any] ) -> Dict:
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : int = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_A )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase_ : Any = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**_A )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : str = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( __A , unittest.TestCase):
a_ = (FocalNetBackbone,) if is_torch_available() else ()
a_ = FocalNetConfig
a_ = False
def A ( self : Dict ) -> str:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 541
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase_ ( A_):
UpperCamelCase__: List[Any] = np.inf
def set_batch_size(A_) -> None:
nonlocal batch_size
if isinstance(A_ ,A_):
UpperCamelCase__: List[Any] = min(A_ ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
elif isinstance(A_ ,A_):
UpperCamelCase__: Tuple = min(A_ ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
elif isinstance(A_ ,A_) and feature.dtype == "binary":
UpperCamelCase__: Any = min(A_ ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
_visit(A_ ,A_)
return None if batch_size is np.inf else batch_size
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: int , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Any = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
UpperCamelCase__: List[str] = _PACKAGED_DATASETS_MODULES["parquet"][1]
UpperCamelCase__: Optional[int] = Parquet(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , hash=__lowerCamelCase , **__lowerCamelCase , )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__: Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__: List[str] = None
UpperCamelCase__: str = None
UpperCamelCase__: List[str] = None
UpperCamelCase__: Any = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase__: Tuple = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self: int , __lowerCamelCase: Dataset , __lowerCamelCase: Union[PathLike, BinaryIO] , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Dict , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = dataset
UpperCamelCase__: str = path_or_buf
UpperCamelCase__: List[str] = batch_size or get_writer_batch_size(dataset.features )
UpperCamelCase__: Union[str, Any] = parquet_writer_kwargs
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
UpperCamelCase__: List[Any] = self._write(file_obj=__lowerCamelCase , batch_size=__lowerCamelCase , **self.parquet_writer_kwargs )
else:
UpperCamelCase__: int = self._write(file_obj=self.path_or_buf , batch_size=__lowerCamelCase , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: BinaryIO , __lowerCamelCase: int , **__lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: str = 0
UpperCamelCase__: Any = parquet_writer_kwargs.pop("path_or_buf" , __lowerCamelCase )
UpperCamelCase__: Dict = self.dataset.features.arrow_schema
UpperCamelCase__: Optional[Any] = pq.ParquetWriter(__lowerCamelCase , schema=__lowerCamelCase , **__lowerCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCamelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
UpperCamelCase__: List[Any] = query_table(
table=self.dataset._data , key=slice(__lowerCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCamelCase )
written += batch.nbytes
writer.close()
return written
| 221
|
def lowerCAmelCase_ ( ):
for n in range(1 ,1_00_00_00):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( A_):
UpperCamelCase__: int = 1
UpperCamelCase__: Dict = 2
while i * i <= n:
UpperCamelCase__: Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(A_) > 5_00)
if __name__ == "__main__":
print(solution())
| 221
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __magic_name__ , )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =RobertaConfig
SCREAMING_SNAKE_CASE_ : Any ="roberta"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = RobertaEmbeddings(SCREAMING_SNAKE_CASE__ )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __magic_name__ , )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =RobertaConfig
SCREAMING_SNAKE_CASE_ : List[Any] ="roberta"
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeRobertaModel(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-1 , SCREAMING_SNAKE_CASE__ : str=False , ):
"""simple docstring"""
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.roberta(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(SCREAMING_SNAKE_CASE__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(SCREAMING_SNAKE_CASE__ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 282
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
# setable values
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] =None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] =None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
return cls()
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class _lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1_00 , SCREAMING_SNAKE_CASE__ : float = 1.007 , SCREAMING_SNAKE_CASE__ : float = 80 , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 50 , ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple = () ):
"""simple docstring"""
UpperCamelCase = jnp.arange(0 , SCREAMING_SNAKE_CASE__ )[::-1].copy()
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE__ , schedule=jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) , timesteps=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = random.split(SCREAMING_SNAKE_CASE__ , num=1 )
UpperCamelCase = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE__ , shape=sample.shape )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
raise NotImplementedError()
| 282
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Union[str, Any] )-> Tuple:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
@slow
def A ( self : List[Any] )-> Union[str, Any]:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
| 228
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 228
| 1
|
'''simple docstring'''
from math import sqrt
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 50
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def a_ ( __a ):
A__ = DPTConfig()
if "large" in checkpoint_url:
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [256, 512, 1024, 1024]
A__ = (1, 384, 384)
if "ade" in checkpoint_url:
A__ = True
A__ = 150
A__ = '''huggingface/label-files'''
A__ = '''ade20k-id2label.json'''
A__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 150, 480, 480]
return config, expected_shape
def a_ ( __a ):
A__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def a_ ( __a ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
A__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
A__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
A__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
A__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
A__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
A__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
A__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
A__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
A__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
A__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
A__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
A__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
A__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
A__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
A__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
A__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
A__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
A__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
A__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
A__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a_ ( __a , __a ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def a_ ( ):
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a_ ( __a , __a , __a , __a ):
A__ , A__ = get_dpt_config(__a )
# load original state_dict from URL
A__ = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(__a )
A__ = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
A__ = 480 if '''ade''' in checkpoint_url else 384
A__ = DPTImageProcessor(size=__a )
A__ = prepare_img()
A__ = image_processor(__a , return_tensors='''pt''' )
# forward pass
A__ = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
A__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
A__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__a , )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
__snake_case : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 571
| 0
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=0 ):
"""simple docstring"""
if name is None:
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
lowerCAmelCase__ = fmt.format(UpperCAmelCase__ )
# Print and recurse (if needed).
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
if msg is not None:
print(UpperCAmelCase__ )
for k in val.keys():
recursive_print(UpperCAmelCase__ , val[k] , spaces + 2 )
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
print(UpperCAmelCase__ , ":" , val.size() )
else:
print(UpperCAmelCase__ , ":" , UpperCAmelCase__ )
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase__ = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase__ = param.view(*UpperCAmelCase__ )
lowerCAmelCase__ = param.transpose(0 , 2 )
lowerCAmelCase__ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase__ = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase__ = param.view(*UpperCAmelCase__ )
lowerCAmelCase__ = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase__ = param.view(*UpperCAmelCase__ )
return param
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = {}
# old versions did not store training args
lowerCAmelCase__ = input_state_dict.get("args" , UpperCAmelCase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase__ = ds_args.padded_vocab_size
lowerCAmelCase__ = ds_args.max_position_embeddings
lowerCAmelCase__ = ds_args.hidden_size
lowerCAmelCase__ = ds_args.num_layers
lowerCAmelCase__ = ds_args.num_attention_heads
lowerCAmelCase__ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase__ = config.n_head
# The hidden_size per head.
lowerCAmelCase__ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase__ = input_state_dict["checkpoint_version"]
else:
lowerCAmelCase__ = 0.0
# The model.
lowerCAmelCase__ = input_state_dict["model"]
# The language model.
lowerCAmelCase__ = model["language_model"]
# The embeddings.
lowerCAmelCase__ = lm["embedding"]
# The word embeddings.
lowerCAmelCase__ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase__ = word_embeddings[: config.vocab_size, :]
lowerCAmelCase__ = word_embeddings
# The position embeddings.
lowerCAmelCase__ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase__ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
lowerCAmelCase__ = pos_embeddings
# The transformer.
lowerCAmelCase__ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
lowerCAmelCase__ = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
lowerCAmelCase__ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase__ = layer_re.match(UpperCAmelCase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase__ = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase__ = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase__ = m.group(3 )
# The name of the layer.
lowerCAmelCase__ = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
lowerCAmelCase__ = "ln_1" if op_name.startswith("input" ) else "ln_2"
lowerCAmelCase__ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase__ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase__ = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase__ = torch.tensor(-1E4 , dtype=torch.floataa )
lowerCAmelCase__ = masked_bias
lowerCAmelCase__ = fix_query_key_value_ordering(UpperCAmelCase__ , UpperCAmelCase__ , 3 , UpperCAmelCase__ , UpperCAmelCase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase__ = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase__ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase__ = fix_query_key_value_ordering(UpperCAmelCase__ , UpperCAmelCase__ , 3 , UpperCAmelCase__ , UpperCAmelCase__ )
# Store. No change of shape.
lowerCAmelCase__ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase__ = megatron_to_transformers[op_name]
lowerCAmelCase__ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase__ = megatron_to_transformers[op_name]
lowerCAmelCase__ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase__ = transformer["final_layernorm.weight"]
lowerCAmelCase__ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase__ = word_embeddings
# It should be done!
return output_state_dict
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=UpperCAmelCase__ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=UpperCAmelCase__ , help="An optional config json file describing the pre-trained model." , )
lowerCAmelCase__ = parser.parse_args()
# Extract the basename.
lowerCAmelCase__ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
lowerCAmelCase__ = torch.load(UpperCAmelCase__ , map_location="cpu" )
else:
lowerCAmelCase__ = torch.load(args.path_to_checkpoint , map_location="cpu" )
lowerCAmelCase__ = input_state_dict.get("args" , UpperCAmelCase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase__ = "gelu_fast"
elif ds_args.openai_gelu:
lowerCAmelCase__ = "gelu_new"
else:
lowerCAmelCase__ = "gelu"
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase__ = "gelu_new"
# Spell out all parameters in case the defaults change.
lowerCAmelCase__ = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCAmelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=UpperCAmelCase__ , summary_activation=UpperCAmelCase__ , summary_proj_to_labels=UpperCAmelCase__ , summary_first_dropout=0.1 , scale_attn_weights=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
lowerCAmelCase__ = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase__ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
lowerCAmelCase__ = convert_megatron_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCAmelCase__ , UpperCAmelCase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase__ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase__ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase__ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
lowerCAmelCase__ = "gpt2"
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase__ = type(UpperCAmelCase__ ).__name__
lowerCAmelCase__ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(UpperCAmelCase__ )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(UpperCAmelCase__ )
# Store the state_dict to file.
lowerCAmelCase__ = os.path.join(UpperCAmelCase__ , "pytorch_model.bin" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 715
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 5_0),)
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Dict ) -> Any:
pass
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def a ( self : Optional[int] ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Tuple ) -> int:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self : List[str] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def a ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def a ( self : int ) -> Dict:
lowerCAmelCase__ = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def a ( self : Any ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def a ( self : int ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 125
| 0
|
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase_ ( lowerCamelCase__=None ):
lowerCamelCase_ = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ )
# The main config parser
lowerCamelCase_ = config_command_parser(lowerCamelCase__ )
# The subparser to add commands to
lowerCamelCase_ = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser] )
update_command_parser(lowerCamelCase__ , parents=[parent_parser] )
return config_parser
def lowerCamelCase_ ( ):
lowerCamelCase_ = get_config_parser()
lowerCamelCase_ = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 463
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __A (__magic_name__ ):
@staticmethod
def _snake_case ( UpperCamelCase_ ):
__UpperCAmelCase : Dict = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase_ , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = model
__UpperCAmelCase : int = cache
__UpperCAmelCase : Union[str, Any] = force
__UpperCAmelCase : Optional[int] = trust_remote_code
def _snake_case ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 168
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
snake_case__ : Tuple = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['bs4'] )
super().__init__(**_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase_ = parent.find_all(child.name , recursive=_UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_UpperCAmelCase ) else next(i for i, s in enumerate(_UpperCAmelCase , 1 ) if s is child ) )
UpperCamelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = BeautifulSoup(_UpperCAmelCase , 'html.parser' )
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = []
for element in html_code.descendants:
if type(_UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase_ = html.unescape(_UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.xpath_soup(_UpperCAmelCase )
stringaxtag_seq.append(_UpperCAmelCase )
stringaxsubs_seq.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = ''
for tagname, subs in zip(_UpperCAmelCase , _UpperCAmelCase ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , _UpperCAmelCase ) -> BatchFeature:
UpperCamelCase_ = False
# Check that strings has a valid type
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = True
elif isinstance(_UpperCAmelCase , (list, tuple) ):
if len(_UpperCAmelCase ) == 0 or isinstance(html_strings[0] , _UpperCAmelCase ):
UpperCamelCase_ = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"""but is of type {type(_UpperCAmelCase )}.""" )
UpperCamelCase_ = bool(isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _UpperCAmelCase )) )
if not is_batched:
UpperCamelCase_ = [html_strings]
# Get nodes + xpaths
UpperCamelCase_ = []
UpperCamelCase_ = []
for html_string in html_strings:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.get_three_from_single(_UpperCAmelCase )
nodes.append(_UpperCAmelCase )
UpperCamelCase_ = []
for node, tag_list, sub_list in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = self.construct_xpath(_UpperCAmelCase , _UpperCAmelCase )
xpath_strings.append(_UpperCAmelCase )
xpaths.append(_UpperCAmelCase )
# return as Dict
UpperCamelCase_ = {'nodes': nodes, 'xpaths': xpaths}
UpperCamelCase_ = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs
| 717
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 6, 8] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ) -> int:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = kernel_size
UpperCamelCase_ = stride
UpperCamelCase_ = padding
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = depths
UpperCamelCase_ = key_dim
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = patch_size
UpperCamelCase_ = attention_ratio
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = initializer_range
UpperCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = LevitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Any:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
UpperCamelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase_ = problem_type['title']
UpperCamelCase_ = problem_type['num_labels']
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> str:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618
| 0
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = BioGptTokenizer
lowerCamelCase__ : List[Any] = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = '''lower'''
SCREAMING_SNAKE_CASE__ = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 100
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase_ = [0, 25, 50]
lowercase_ = [25, 50, 75]
lowercase_ = fuzz.membership.trimf(X, abca)
lowercase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase_ = np.ones(75)
lowercase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 314
| 0
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
UpperCAmelCase_ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
UpperCAmelCase_ : Tuple = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase_ : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase_ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = DPRContextEncoderTokenizer
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = DPRQuestionEncoderTokenizer
UpperCAmelCase_ : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCAmelCase_ : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCAmelCase_ : List[Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A )
class UpperCAmelCase__ :
def __call__( self : str,__A : Dict,__A : Optional[str] = None,__A : Optional[str] = None,__A : Union[bool, str] = False,__A : Union[bool, str] = False,__A : Optional[int] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[bool] = None,**__A : Optional[int],):
if titles is None and texts is None:
return super().__call__(
__A,padding=__A,truncation=__A,max_length=__A,return_tensors=__A,return_attention_mask=__A,**__A,)
elif titles is None or texts is None:
_lowerCamelCase : str = titles if texts is None else texts
return super().__call__(
__A,__A,padding=__A,truncation=__A,max_length=__A,return_tensors=__A,return_attention_mask=__A,**__A,)
_lowerCamelCase : int = titles if not isinstance(__A,__A ) else [titles]
_lowerCamelCase : List[Any] = texts if not isinstance(__A,__A ) else [texts]
_lowerCamelCase : Tuple = len(__A )
_lowerCamelCase : str = questions if not isinstance(__A,__A ) else [questions] * n_passages
assert len(__A ) == len(
__A ), f'There should be as many titles than texts but got {len(__A )} titles and {len(__A )} texts.'
_lowerCamelCase : int = super().__call__(__A,__A,padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = super().__call__(__A,add_special_tokens=__A,padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : Optional[int] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__A,__A )
]
}
if return_attention_mask is not False:
_lowerCamelCase : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCamelCase : int = attention_mask
return self.pad(__A,padding=__A,max_length=__A,return_tensors=__A )
def lowerCamelCase_ ( self : Optional[int],__A : BatchEncoding,__A : DPRReaderOutput,__A : int = 1_6,__A : int = 6_4,__A : int = 4,):
_lowerCamelCase : Optional[Any] = reader_input["input_ids"]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = reader_output[:3]
_lowerCamelCase : str = len(__A )
_lowerCamelCase : List[str] = sorted(range(__A ),reverse=__A,key=relevance_logits.__getitem__ )
_lowerCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCamelCase : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCamelCase : str = sequence_ids.index(self.sep_token_id,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCamelCase : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
_lowerCamelCase : Optional[int] = len(__A )
_lowerCamelCase : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len],end_logits=end_logits[doc_id][passage_offset:sequence_len],max_answer_length=__A,top_spans=__A,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],relevance_score=relevance_logits[doc_id],doc_id=__A,start_index=__A,end_index=__A,text=self.decode(sequence_ids[start_index : end_index + 1] ),) )
if len(__A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : List[int],__A : int,__A : int,):
_lowerCamelCase : int = []
for start_index, start_score in enumerate(__A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCamelCase : List[str] = sorted(__A,key=lambda __A : x[1],reverse=__A )
_lowerCamelCase : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
_lowerCamelCase : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = DPRReaderTokenizer
| 11
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 11
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.