code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Optional[int] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( __a : Tuple ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( __a : Optional[Any] , __a : int ):
"""simple docstring"""
if args.student_type == "roberta":
a__ = False
elif args.student_type == "gpt2":
a__ = False
def A_ ( __a : Tuple , __a : List[Any] ):
"""simple docstring"""
if args.student_type == "roberta":
a__ = False
def A_ ( ):
"""simple docstring"""
a__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.1_5 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=__a , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=__a , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.0_2 , type=__a , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__a , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" )
a__ = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
a__ , a__ , a__ = MODEL_CLASSES[args.student_type]
a__ , a__ , a__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__ = tokenizer.all_special_tokens.index(__a )
a__ = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
a__ = special_tok_ids
a__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
a__ = pickle.load(__a )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
a__ = pickle.load(__a )
a__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__ = 0.0 # do not predict special tokens
a__ = torch.from_numpy(__a )
else:
a__ = None
a__ = LmSeqsDataset(params=__a , data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
a__ = student_config_class.from_pretrained(args.student_config )
a__ = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
a__ = student_model_class(__a )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
a__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__ = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 704 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[str] = None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] = "dict"
UpperCamelCase__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
UpperCamelCase__ : str = field(default="""Audio""" ,init=SCREAMING_SNAKE_CASE ,repr=SCREAMING_SNAKE_CASE)
def __call__( self ):
return self.pa_type
def _a ( self , a_ ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(a_ , a_ ):
return {"bytes": None, "path": value}
elif isinstance(a_ , a_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
a__ = BytesIO()
sf.write(a_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
a__ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
a__ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
a__ = BytesIO(bytes() )
sf.write(a_ , a_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _a ( self , a_ , a_ = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
a__ , a__ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
a__ = xsplitext(a_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
a__ = token_per_repo_id or {}
a__ = path.split("""::""" )[-1]
try:
a__ = string_to_dict(a_ , config.HUB_DATASETS_URL )["""repo_id"""]
a__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
a__ = None
with xopen(a_ , """rb""" , use_auth_token=a_ ) as f:
a__ , a__ = sf.read(a_ )
else:
a__ , a__ = sf.read(a_ )
a__ = array.T
if self.mono:
a__ = librosa.to_mono(a_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
a__ = librosa.resample(a_ , orig_sr=a_ , target_sr=self.sampling_rate )
a__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _a ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _a ( self , a_ ):
if pa.types.is_string(storage.type ):
a__ = pa.array([None] * len(a_ ) , type=pa.binary() )
a__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a__ = pa.array([None] * len(a_ ) , type=pa.string() )
a__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
a__ = pa.array([Audio().encode_example(a_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
a__ = storage.field("""bytes""" )
else:
a__ = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
a__ = storage.field("""path""" )
else:
a__ = pa.array([None] * len(a_ ) , type=pa.string() )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(a_ , self.pa_type )
def _a ( self , a_ ):
@no_op_if_value_is_null
def path_to_bytes(a_ ):
with xopen(a_ , """rb""" ) as f:
a__ = f.read()
return bytes_
a__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
| 351 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ : Optional[int] = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCamelCase (__lowerCamelCase : str = "dhaka" , __lowerCamelCase : int = 5 ) -> int:
a__ = min(__lowerCamelCase , 50 ) # Prevent abuse!
a__ = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
a__ = requests.get("https://www.google.com/search" , params=__lowerCamelCase , headers=__lowerCamelCase )
a__ = BeautifulSoup(html.text , "html.parser" )
a__ = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
a__ = json.dumps(__lowerCamelCase )
a__ = json.loads(__lowerCamelCase )
a__ = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __lowerCamelCase , )
if not matched_google_image_data:
return 0
a__ = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__lowerCamelCase ) , )
a__ = re.findall(
r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __lowerCamelCase , )
for index, fixed_full_res_image in enumerate(__lowerCamelCase ):
if index >= max_images:
return index
a__ = bytes(__lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
a__ = bytes(__lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
a__ = urllib.request.build_opener()
a__ = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__lowerCamelCase )
a__ = f'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
urllib.request.urlretrieve( # noqa: S310
__lowerCamelCase , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase_ : int = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print("Please provide a search term.")
raise
| 489 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : List[str] ):
'''simple docstring'''
a__ = "ZinengTang/tvlt-base"
a__ = tempfile.mkdtemp()
def __a ( self : str , **lowerCamelCase : Tuple ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase )
def __a ( self : Dict , **lowerCamelCase : Dict ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def __a ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_feature_extractor()
a__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
a__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_feature_extractor()
a__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
a__ = np.ones([1_2_0_0_0] )
a__ = feature_extractor(lowerCamelCase , return_tensors="np" )
a__ = processor(audio=lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_feature_extractor()
a__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
a__ = np.ones([3, 2_2_4, 2_2_4] )
a__ = image_processor(lowerCamelCase , return_tensors="np" )
a__ = processor(images=lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __a ( self : str ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_feature_extractor()
a__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
a__ = np.ones([1_2_0_0_0] )
a__ = np.ones([3, 2_2_4, 2_2_4] )
a__ = processor(audio=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __a ( self : int ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_feature_extractor()
a__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 489 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE,**SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ).save_pretrained(SCREAMING_SNAKE_CASE )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 494 |
"""simple docstring"""
from itertools import product
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(SCREAMING_SNAKE_CASE,max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE,repeat=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(SCREAMING_SNAKE_CASE,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(SCREAMING_SNAKE_CASE,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 494 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A__( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''swish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''silu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''mish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''gelu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 482 |
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> int:
__SCREAMING_SNAKE_CASE = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__SCREAMING_SNAKE_CASE = hex_num[0] == '''-'''
if is_negative:
__SCREAMING_SNAKE_CASE = hex_num[1:]
try:
__SCREAMING_SNAKE_CASE = int(UpperCAmelCase__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__SCREAMING_SNAKE_CASE = ''''''
while int_num > 0:
__SCREAMING_SNAKE_CASE = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 1 |
from statistics import mean, stdev
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 3 ):
lowercase = min(SCREAMING_SNAKE_CASE_ )
lowercase = max(SCREAMING_SNAKE_CASE_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,SCREAMING_SNAKE_CASE_ ) for x in data]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 3 ):
lowercase = mean(SCREAMING_SNAKE_CASE_ )
lowercase = stdev(SCREAMING_SNAKE_CASE_ )
# standardize data
return [round((x - mu) / (sigma) ,SCREAMING_SNAKE_CASE_ ) for x in data]
| 715 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
_A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
class A_ ( __a ):
_A :List[str] = False
class A_ ( __a ):
_A :Dict = False
class A_ ( __a ):
_A :Union[str, Any] = '''AutoFeatureExtractor'''
_A :Tuple = '''AutoTokenizer'''
_A :Optional[Any] = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A_ ( unittest.TestCase ):
_A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
lowercase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 72 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Any ) -> Union[str, Any]: # noqa: E741
lowercase : List[Any] =len(__magic_name__ )
lowercase : List[str] =0
lowercase : List[str] =[0] * n
lowercase : List[Any] =[False] * n
lowercase : List[str] =[False] * n
def dfs(__magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : str ):
if parent == root:
out_edge_count += 1
lowercase : Dict =True
lowercase : int =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase : Optional[int] =dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : int =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase : str =True
# AP found via cycle
if at == low[to]:
lowercase : str =True
else:
lowercase : Optional[Any] =min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase : Optional[Any] =0
lowercase : List[Any] =dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase : int =out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
UpperCamelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 92 |
'''simple docstring'''
import datasets
UpperCamelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
| 92 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = data
# Initialize hash values
UpperCAmelCase_ : List[str] = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
UpperCAmelCase_ : int = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
UpperCAmelCase_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = b'\x80' + (b'\x00' * (6_3 - (len(snake_case_ ) + 8) % 6_4))
UpperCAmelCase_ : int = struct.pack('>Q' , (len(snake_case_ ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase_ : int = list(struct.unpack('>16L' , snake_case_ ) )
# add 48 0-ed integers
words += [0] * 4_8
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase_ : Optional[int] = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
UpperCAmelCase_ : str = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
UpperCAmelCase_ : List[str] = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
UpperCAmelCase_ : Optional[int] = self.ror(snake_case_ , 6 ) ^ self.ror(snake_case_ , 1_1 ) ^ self.ror(snake_case_ , 2_5 )
UpperCAmelCase_ : Optional[int] = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
UpperCAmelCase_ : Optional[int] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
UpperCAmelCase_ : Any = self.ror(snake_case_ , 2 ) ^ self.ror(snake_case_ , 1_3 ) ^ self.ror(snake_case_ , 2_2 )
UpperCAmelCase_ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase_ : Dict = (sa + maj) % 0x1_0000_0000
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
UpperCAmelCase_ : Any = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase_ : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase_ : Union[str, Any] = ''.join([hex(snake_case_ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (3_2 - rotations)) | (value >> rotations)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
import hashlib
UpperCAmelCase_ : List[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(snake_case_ ).hash , hashlib.shaaaa(snake_case_ ).hexdigest() )
def _lowerCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase_ : Union[str, Any] = f.read()
else:
UpperCAmelCase_ : str = bytes(lowerCamelCase_ , 'utf-8' )
print(SHAaaa(lowerCamelCase_ ).hash )
if __name__ == "__main__":
main()
| 389 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : str = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase_ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ , lowerCamelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 389 | 1 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__lowerCamelCase :BertModel , __lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
_lowerCAmelCase = model.state_dict()
def to_tf_var_name(__lowerCamelCase :str ):
for patt, repl in iter(a_ ):
_lowerCAmelCase = name.replace(a_ , a_ )
return f'bert/{name}'
def create_tf_var(__lowerCamelCase :np.ndarray , __lowerCamelCase :str , __lowerCamelCase :tf.Session ):
_lowerCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase = to_tf_var_name(a_ )
_lowerCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase = torch_tensor.T
_lowerCAmelCase = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
_lowerCAmelCase = session.run(a_ )
print(f'Successfully created {tf_name}: {np.allclose(a_ , a_ )}' )
_lowerCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def A (__lowerCamelCase :Any=None ):
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=a_ , required=a_ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=a_ , default=a_ , required=a_ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=a_ , required=a_ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=a_ , required=a_ , help="""Directory in which to save tensorflow model""" )
_lowerCAmelCase = parser.parse_args(a_ )
_lowerCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 5 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = ['''pixel_values''']
def __init__( self : Union[str, Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : List[str] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCAmelCase__ )
_UpperCAmelCase : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_UpperCAmelCase : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : Any = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : Optional[int] = do_center_crop
_UpperCAmelCase : Dict = crop_size
_UpperCAmelCase : Tuple = do_rescale
_UpperCAmelCase : int = rescale_factor
_UpperCAmelCase : Optional[Any] = do_normalize
_UpperCAmelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Any = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Union[str, Any] , ) -> int:
"""simple docstring"""
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : str = resample if resample is not None else self.resample
_UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = size if size is not None else self.size
_UpperCAmelCase : Optional[Any] = get_size_dict(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : int = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
_UpperCAmelCase : Dict = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : List[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase : Optional[int] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCAmelCase : int = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase : Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCAmelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) | 494 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a ={"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[Any]):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
| 337 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] ,A : str ,A : str=13 ,A : Optional[int]=7 ,A : str=True ,A : Any=True ,A : Union[str, Any]=True ,A : List[Any]=True ,A : Union[str, Any]=99 ,A : Optional[Any]=32 ,A : List[str]=5 ,A : Union[str, Any]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Tuple=0.1 ,A : Optional[int]=0.1 ,A : Optional[int]=512 ,A : List[str]=16 ,A : Any=2 ,A : int=0.0_2 ,A : Any=4 ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : List[str] = use_attention_mask
UpperCAmelCase__ : Dict = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Dict = num_choices
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_attention_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : Any = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = True
snake_case_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxRobertaModelTester(self )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Any = model_class_name.from_pretrained("""roberta-base""" ,from_pt=A )
UpperCAmelCase__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
| 65 |
from __future__ import annotations
def __snake_case ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) == 0:
return []
lowercase , lowercase = min(__magic_name__ ), max(__magic_name__ )
lowercase = int(max_value - min_value ) + 1
lowercase = [[] for _ in range(__magic_name__ )]
for i in my_list:
buckets[int(i - min_value )].append(__magic_name__ )
return [v for bucket in buckets for v in sorted(__magic_name__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 441 | 0 |
"""simple docstring"""
from statistics import mean, stdev
def a__ ( snake_case__ , snake_case__ = 3 ) -> list:
lowerCamelCase = min(snake_case__ )
lowerCamelCase = max(snake_case__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case__ ) for x in data]
def a__ ( snake_case__ , snake_case__ = 3 ) -> list:
lowerCamelCase = mean(snake_case__ )
lowerCamelCase = stdev(snake_case__ )
# standardize data
return [round((x - mu) / (sigma) , snake_case__ ) for x in data]
| 719 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["note_seq"]
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 533 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = PhobertTokenizer
snake_case_ = False
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['#version: 0.2', 'l à</w>']
__lowerCamelCase = {'unk_token': '<unk>'}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def lowercase_ ( self , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = 'Tôi là VinAI Research'
__lowerCamelCase = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = 'Tôi là VinAI Research'
__lowerCamelCase = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
print(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 469 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase__ = Mapping[str, np.ndarray]
UpperCamelCase__ = Mapping[str, Any] # Is a nested dict.
UpperCamelCase__ = 0.0_1
@dataclasses.dataclass(frozen=UpperCAmelCase_ )
class A :
__UpperCAmelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__UpperCAmelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__UpperCAmelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__UpperCAmelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__UpperCAmelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
__UpperCAmelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
__UpperCAmelCase : Optional[Sequence[int]] = None
def lowerCAmelCase_ ( __A ) -> Protein:
'''simple docstring'''
UpperCAmelCase__ = r"(\[[A-Z]+\]\n)"
UpperCAmelCase__ = [tag.strip() for tag in re.split(__A, __A ) if len(__A ) > 0]
UpperCAmelCase__ = zip(tags[0::2], [l.split("\n" ) for l in tags[1::2]] )
UpperCAmelCase__ = ["N", "CA", "C"]
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase__ = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase__ = "X" # FIXME: strings are immutable
UpperCAmelCase__ = np.array(
[residue_constants.restype_order.get(__A, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(__A, g[1][axis].split() ) ) )
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase__ = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip() ) ) )
UpperCAmelCase__ = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A, atom_mask=__A, aatype=__A, residue_index=np.arange(len(__A ) ), b_factors=__A, )
def lowerCAmelCase_ ( __A, __A = 0 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
UpperCAmelCase__ = prot.parents
UpperCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase__ = [p for i, p in zip(__A, __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
UpperCAmelCase__ = ["N/A"]
pdb_headers.append(f"""PARENT {" ".join(__A )}""" )
return pdb_headers
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = pdb_str.split("\n" )
UpperCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
UpperCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase__ = []
if prot.parents_chain_index is not None:
UpperCAmelCase__ = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(__A ), [] )
parent_dict[str(__A )].append(__A )
UpperCAmelCase__ = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase__ = parent_dict.get(str(__A ), ["N/A"] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase__ = [["N/A"]]
def make_parent_line(__A ) -> str:
return f"""PARENT {" ".join(__A )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase__ = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
UpperCAmelCase__ = parents_per_chain[chain_counter]
else:
UpperCAmelCase__ = ["N/A"]
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = residue_constants.restypes + ["X"]
def res_atoa(__A ) -> str:
return residue_constants.restype_atoa.get(restypes[r], "UNK" )
UpperCAmelCase__ = residue_constants.atom_types
UpperCAmelCase__ = []
UpperCAmelCase__ = prot.atom_mask
UpperCAmelCase__ = prot.aatype
UpperCAmelCase__ = prot.atom_positions
UpperCAmelCase__ = prot.residue_index.astype(np.intaa )
UpperCAmelCase__ = prot.b_factors
UpperCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
UpperCAmelCase__ = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
UpperCAmelCase__ = aatype.shape[0]
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = string.ascii_uppercase
UpperCAmelCase__ = None
# Add all atom sites.
for i in range(__A ):
UpperCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase__ = "ATOM"
UpperCAmelCase__ = atom_name if len(__A ) == 4 else f""" {atom_name}"""
UpperCAmelCase__ = ""
UpperCAmelCase__ = ""
UpperCAmelCase__ = 1.00
UpperCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase__ = ""
UpperCAmelCase__ = "A"
if chain_index is not None:
UpperCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase__ = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__A )
atom_index += 1
UpperCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase__ = True
UpperCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase__ = "TER"
UpperCAmelCase__ = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A, __A ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__A )
def lowerCAmelCase_ ( __A ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ ( __A, __A, __A = None, __A = None, __A = None, __A = None, __A = None, ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"], atom_positions=result["final_atom_positions"], atom_mask=result["final_atom_mask"], residue_index=features["residue_index"] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ), chain_index=__A, remark=__A, parents=__A, parents_chain_index=__A, )
| 486 | 0 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase : Optional[int] = 637_8137.0
lowercase : str = 635_6752.31_4245
lowercase : Optional[Any] = 637_8137
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> float:
_snake_case = (AXIS_A - AXIS_B) / AXIS_A
_snake_case = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
_snake_case = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
_snake_case = radians(__UpperCAmelCase )
_snake_case = radians(__UpperCAmelCase )
# Equation
_snake_case = sin((phi_a - phi_a) / 2 )
_snake_case = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_snake_case = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = self.vocab_size - 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = OpenAIGPTForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowercase = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
_snake_case = inputs_dict['labels']
_snake_case = inputs_dict['labels']
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OpenAIGPTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = OpenAIGPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCAmelCase_ )
_snake_case = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is
_snake_case = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_snake_case = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 542 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase ( UpperCamelCase__ ):
lowercase = '''char'''
lowercase = '''bpe'''
lowercase = '''wp'''
UpperCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase ( UpperCamelCase__ ):
lowercase = ['''image_processor''', '''char_tokenizer''']
lowercase = '''ViTImageProcessor'''
lowercase = '''MgpstrTokenizer'''
def __init__(self : Any ,SCREAMING_SNAKE_CASE_ : Dict=None ,SCREAMING_SNAKE_CASE_ : int=None ,**SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
lowerCAmelCase = tokenizer
lowerCAmelCase = AutoTokenizer.from_pretrained('''gpt2''' )
lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __call__(self : List[str] ,SCREAMING_SNAKE_CASE_ : List[str]=None ,SCREAMING_SNAKE_CASE_ : Any=None ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCAmelCase = self.char_tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings['''input_ids''']
return inputs
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = sequences
lowerCAmelCase = char_preds.size(0 )
lowerCAmelCase , lowerCAmelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ ,'''char''' )
lowerCAmelCase , lowerCAmelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ ,'''bpe''' )
lowerCAmelCase , lowerCAmelCase = self._decode_helper(SCREAMING_SNAKE_CASE__ ,'''wp''' )
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase = scores.index(max(SCREAMING_SNAKE_CASE__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase = {}
lowerCAmelCase = final_strs
lowerCAmelCase = final_scores
lowerCAmelCase = char_strs
lowerCAmelCase = bpe_strs
lowerCAmelCase = wp_strs
return out
def UpperCAmelCase (self : List[Any] ,SCREAMING_SNAKE_CASE_ : Tuple ,SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
"""simple docstring"""
if format == DecodeType.CHARACTER:
lowerCAmelCase = self.char_decode
lowerCAmelCase = 1
lowerCAmelCase = '''[s]'''
elif format == DecodeType.BPE:
lowerCAmelCase = self.bpe_decode
lowerCAmelCase = 2
lowerCAmelCase = '''#'''
elif format == DecodeType.WORDPIECE:
lowerCAmelCase = self.wp_decode
lowerCAmelCase = 102
lowerCAmelCase = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
lowerCAmelCase , lowerCAmelCase = [], []
lowerCAmelCase = pred_logits.size(0 )
lowerCAmelCase = pred_logits.size(1 )
lowerCAmelCase , lowerCAmelCase = pred_logits.topk(1 ,dim=-1 ,largest=SCREAMING_SNAKE_CASE__ ,sorted=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = preds_index.view(-1 ,SCREAMING_SNAKE_CASE__ )[:, 1:]
lowerCAmelCase = decoder(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase , lowerCAmelCase = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE__ ,dim=2 ).max(dim=2 )
lowerCAmelCase = preds_max_prob[:, 1:]
for index in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase = preds_str[index].find(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = preds_str[index][:pred_eos]
lowerCAmelCase = preds_index[index].cpu().tolist()
lowerCAmelCase = pred_index.index(SCREAMING_SNAKE_CASE__ ) if eos_token in pred_index else -1
lowerCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(SCREAMING_SNAKE_CASE__ )
conf_scores.append(SCREAMING_SNAKE_CASE__ )
return dec_strs, conf_scores
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = [seq.replace(''' ''' ,'''''' ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )]
return decode_strs
def UpperCAmelCase (self : Any ,SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase (self : Tuple ,SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [seq.replace(''' ''' ,'''''' ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )]
return decode_strs
| 535 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCamelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "facebook/nllb-200-distilled-600M"
snake_case__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
snake_case__ = "translator"
snake_case__ = AutoTokenizer
snake_case__ = AutoModelForSeqaSeqLM
snake_case__ = LANGUAGE_CODES
snake_case__ = ["text", "text", "text"]
snake_case__ = ["text"]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors="pt" , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 61 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""input_features""", """attention_mask"""]
def __init__( self , A_=80 , A_=16000 , A_=0.0 , A_=10 , A_=25 , A_="hamming_window" , A_=32768.0 , A_=0.97 , A_=1.0 , A_=True , A_=True , A_=False , **A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase = feature_size
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = hop_length
UpperCamelCase = win_length
UpperCamelCase = frame_signal_scale
UpperCamelCase = preemphasis_coeff
UpperCamelCase = mel_floor
UpperCamelCase = normalize_means
UpperCamelCase = normalize_vars
UpperCamelCase = win_function
UpperCamelCase = return_attention_mask
UpperCamelCase = win_length * sampling_rate // 1000
UpperCamelCase = hop_length * sampling_rate // 1000
UpperCamelCase = optimal_fft_length(self.sample_size )
UpperCamelCase = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self , A_ )-> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ )
else:
UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase = spectrogram(
one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
if self.normalize_means:
UpperCamelCase = x[:input_length].mean(axis=0 )
UpperCamelCase = np.subtract(A_ , A_ )
if self.normalize_vars:
UpperCamelCase = x[:input_length].std(axis=0 )
UpperCamelCase = np.divide(A_ , A_ )
if input_length < x.shape[0]:
UpperCamelCase = padding_value
# make sure array is in float32
UpperCamelCase = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[np.ndarray]:
'''simple docstring'''
UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A_ , A_ , self.padding_value ) for x, n in zip(A_ , A_ )]
def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , )-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [raw_speech]
# extract fbank features
UpperCamelCase = [self._extract_mfsc_features(A_ ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase = BatchFeature({'input_features': features} )
UpperCamelCase = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , A_ ):
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=A_ )
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 705 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger('transformers.models.speecht5')
lowerCAmelCase : Tuple = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
lowerCAmelCase : List[str] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
lowerCAmelCase : Dict = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
lowerCAmelCase : Optional[Any] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
lowerCAmelCase : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
lowerCAmelCase : Optional[int] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
lowerCAmelCase : List[Any] = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
lowerCAmelCase : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
lowerCAmelCase : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCAmelCase : int = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
lowerCAmelCase : int = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
lowerCAmelCase : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
lowerCAmelCase : Any = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def A_( A : Optional[Any] , A : Dict , A : str , A : Optional[int] , A : List[str]):
for attribute in key.split('.'):
UpperCamelCase = getattr(A , A)
if weight_type is not None:
UpperCamelCase = getattr(A , A).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''')
def A_( A : List[str] , A : Tuple):
for key in ignore_keys:
if key.endswith('.*'):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_( A : Union[str, Any] , A : List[str] , A : Optional[int]):
UpperCamelCase = []
if task == "s2t":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2T
UpperCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase = None
UpperCamelCase = MAPPING_T2S
UpperCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2S
UpperCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''')
for name, value in fairseq_dict.items():
if should_ignore(A , A):
logger.info(f'''{name} was ignored''')
continue
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
UpperCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A)[0].split('.')[-2]
UpperCamelCase = mapped_key.replace('*' , A)
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A , A , A , A , A)
continue
if not is_used:
unused_weights.append(A)
logger.warning(f'''Unused weights: {unused_weights}''')
def A_( A : Dict , A : Optional[int] , A : str , A : Dict , A : Any):
UpperCamelCase = full_name.split('conv_layers.')[-1]
UpperCamelCase = name.split('.')
UpperCamelCase = int(items[0])
UpperCamelCase = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(A)
@torch.no_grad()
def A_( A : Optional[Any] , A : List[str] , A : Tuple , A : Optional[Any]=None , A : Any=None , A : Optional[int]=None , ):
if config_path is not None:
UpperCamelCase = SpeechTaConfig.from_pretrained(A)
else:
UpperCamelCase = SpeechTaConfig()
if task == "s2t":
UpperCamelCase = config.max_text_positions
UpperCamelCase = SpeechTaForSpeechToText(A)
elif task == "t2s":
UpperCamelCase = 1876
UpperCamelCase = 600
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForTextToSpeech(A)
elif task == "s2s":
UpperCamelCase = 1876
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForSpeechToSpeech(A)
else:
raise ValueError(f'''Unknown task name: {task}''')
if vocab_path:
UpperCamelCase = SpeechTaTokenizer(A , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken('<mask>' , lstrip=A , rstrip=A)
UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = SpeechTaProcessor(tokenizer=A , feature_extractor=A)
processor.save_pretrained(A)
UpperCamelCase = torch.load(A)
recursively_load_weights(fairseq_checkpoint['model'] , A , A)
model.save_pretrained(A)
if repo_id:
print('Pushing to the hub...')
processor.push_to_hub(A)
model.push_to_hub(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 432 | 0 |
import qiskit
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = qiskit.Aer.get_backend("aer_simulator" )
SCREAMING_SNAKE_CASE : Dict = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : List[Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowercase )
if __name__ == "__main__":
snake_case = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 62 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_: List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_: Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_: Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_: Any = frozenset([] )
def A ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
_SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_SCREAMING_SNAKE_CASE : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Optional[int]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE : Any = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowerCAmelCase_ ).startswith('mps' ):
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Any = StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase_ ).images
_SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='np' , )
_SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='np' , )
_SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : str = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : List[Any] = PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='np' , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 621 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__: str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :GenericTensor ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
_a : str =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Dict =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :GenericTensor ) -> np.ndarray:
'''simple docstring'''
_a : int =self.get_masked_index(SCREAMING_SNAKE_CASE )
_a : Dict =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :GenericTensor ) -> Any:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str=None , **SCREAMING_SNAKE_CASE :Tuple ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
_a : List[str] =self.framework
_a : Any =self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
return model_inputs
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Any =self.model(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =model_inputs["""input_ids"""]
return model_outputs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict=5 , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> Any:
'''simple docstring'''
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : int =target_ids.shape[0]
_a : Dict =model_outputs["""input_ids"""][0]
_a : Optional[int] =model_outputs["""logits"""]
if self.framework == "tf":
_a : Any =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : str =outputs.numpy()
_a : Optional[int] =outputs[0, masked_index, :]
_a : Dict =stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
_a : Dict =tf.gather_nd(tf.squeeze(SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Optional[int] =tf.expand_dims(SCREAMING_SNAKE_CASE , 0 )
_a : int =tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
_a , _a : List[Any] =topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[Any] =outputs[0, masked_index, :]
_a : List[Any] =logits.softmax(dim=-1 )
if target_ids is not None:
_a : Tuple =probs[..., target_ids]
_a , _a : Optional[Any] =probs.topk(SCREAMING_SNAKE_CASE )
_a : Any =[]
_a : Optional[int] =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Tuple =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[Any] =input_ids.numpy().copy()
if target_ids is not None:
_a : int =target_ids[p].tolist()
_a : str =p
# Filter padding out:
_a : List[Any] =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : int =self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : List[Any] =[targets]
try:
_a : int =self.tokenizer.get_vocab()
except Exception:
_a : Optional[Any] ={}
_a : Union[str, Any] =[]
for target in targets:
_a : Union[str, Any] =vocab.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if id_ is None:
_a : Dict =self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , max_length=1 , truncation=SCREAMING_SNAKE_CASE , )["""input_ids"""]
if len(SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
_a : str =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
_a : List[str] =list(set(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
_a : Tuple =np.array(SCREAMING_SNAKE_CASE )
return target_ids
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :List[Any]=None ) -> Tuple:
'''simple docstring'''
_a : str ={}
if targets is not None:
_a : Tuple =self.get_target_ids(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =target_ids
if top_k is not None:
_a : Optional[int] =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self :str , SCREAMING_SNAKE_CASE :List[str] , *SCREAMING_SNAKE_CASE :Union[str, Any] , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 506 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A__: Dict = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = ["pixel_values"]
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : int =size if size is not None else {"""shortest_edge""": 2_5_6}
_a : int =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
_a : Tuple =do_resize
_a : Optional[Any] =size
_a : Any =resample
_a : Any =do_center_crop
_a : Optional[int] =crop_size
_a : int =do_rescale
_a : Union[str, Any] =rescale_factor
_a : List[Any] =do_normalize
_a : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : int =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :int , ) -> np.ndarray:
'''simple docstring'''
_a : List[str] =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_a : Union[str, Any] =get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :int , ) -> np.ndarray:
'''simple docstring'''
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :List[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[float] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :str , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =do_resize if do_resize is not None else self.do_resize
_a : List[Any] =size if size is not None else self.size
_a : List[str] =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : List[Any] =resample if resample is not None else self.resample
_a : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
_a : List[Any] =crop_size if crop_size is not None else self.crop_size
_a : int =get_size_dict(SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
_a : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
_a : List[Any] =image_mean if image_mean is not None else self.image_mean
_a : Any =image_std if image_std is not None else self.image_std
_a : Optional[int] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_a : str =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_a : Any =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_a : int =[self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_a : Optional[int] =[self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_a : Optional[int] =[self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
_a : Dict =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : Dict ={"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Tuple] = None ) -> List[Any]:
'''simple docstring'''
_a : Any =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
_a : List[str] =target_sizes.numpy()
_a : List[str] =[]
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
_a : List[str] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE )
_a : Any =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
_a : Optional[int] =logits.argmax(dim=1 )
_a : List[str] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 506 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> list:
UpperCAmelCase__ : Any = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = collection[i]
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List[str] = i - 1
while low <= high:
UpperCAmelCase__ : Any = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase__ : Any = mid - 1
else:
UpperCAmelCase__ : List[Any] = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
UpperCAmelCase__ : List[str] = collection[j - 1]
UpperCAmelCase__ : str = val
return collection
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 182 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182 | 1 |
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
A__ = len(__UpperCamelCase )
A__ = len(__UpperCamelCase )
# declaring the array for storing the dp values
A__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1, m + 1 ):
for j in range(1, n + 1 ):
A__ = 1 if x[i - 1] == y[j - 1] else 0
A__ = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match )
A__ = ''''''
A__ = m, n
while i > 0 and j > 0:
A__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCamelCase = 'AGGTAB'
UpperCamelCase = 'GXTXAYB'
UpperCamelCase = 4
UpperCamelCase = 'GTAB'
UpperCamelCase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Optional[Any]:
A__ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
A__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_euler" )
A__ = "A painting of a squirrel eating a burger"
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ) -> Tuple:
A__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_euler" )
A__ = "A painting of a squirrel eating a burger"
A__ = torch.manual_seed(0 )
A__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def snake_case__ ( self ) -> Optional[Any]:
A__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
A__ = "A painting of a squirrel eating a burger"
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 562 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__ : Union[str, Any] =logging.get_logger(__name__)
a__ : Union[str, Any] ={
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] ="van"
def __init__( self : Tuple , __A : Union[str, Any]=2_2_4 , __A : Optional[int]=3 , __A : Optional[int]=[7, 3, 3, 3] , __A : Union[str, Any]=[4, 2, 2, 2] , __A : Any=[6_4, 1_2_8, 3_2_0, 5_1_2] , __A : Tuple=[3, 3, 1_2, 3] , __A : Union[str, Any]=[8, 8, 4, 4] , __A : Optional[Any]="gelu" , __A : Optional[int]=0.02 , __A : int=1e-6 , __A : Optional[Any]=1e-2 , __A : Optional[Any]=0.0 , __A : Tuple=0.0 , **__A : Optional[Any] , ):
super().__init__(**__A )
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = patch_sizes
__UpperCamelCase = strides
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = mlp_ratios
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = layer_scale_init_value
__UpperCamelCase = drop_path_rate
__UpperCamelCase = dropout_rate
| 399 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a__ : Union[str, Any] =getLogger(__name__)
a__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase__ ( __lowercase : List[str] , __lowercase : str , __lowercase : str , __lowercase : int = 8 , __lowercase : str = DEFAULT_DEVICE , __lowercase : Optional[int]=False , __lowercase : Optional[Any]="summarization" , __lowercase : List[str]=None , **__lowercase : List[str] , ) -> Dict:
"""simple docstring"""
__UpperCamelCase = Path(__lowercase ).open('w' , encoding='utf-8' )
__UpperCamelCase = str(__lowercase )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).to(__lowercase )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(__lowercase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(__lowercase , __lowercase )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(__lowercase , __lowercase ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(__lowercase , return_tensors='pt' , truncation=__lowercase , padding='longest' ).to(__lowercase )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowercase , )
__UpperCamelCase = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(__lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase__ ( ) -> Dict:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowercase__ ( __lowercase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__lowercase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__lowercase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__lowercase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__lowercase , required=__lowercase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__lowercase , required=__lowercase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__lowercase , required=__lowercase , default=__lowercase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__lowercase , required=__lowercase , default=__lowercase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__lowercase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__lowercase , default=8 , required=__lowercase , help='batch size' )
parser.add_argument(
'--n_obs' , type=__lowercase , default=-1 , required=__lowercase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__lowercase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(__lowercase )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
__UpperCamelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__UpperCamelCase = generate_summaries_or_translations(
__lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowercase , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__lowercase )]
__UpperCamelCase = score_fn(__lowercase , __lowercase )
scores.update(__lowercase )
if args.dump_args:
scores.update(__lowercase )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(__lowercase )
if args.score_path is not None:
json.dump(__lowercase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 399 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Any , _lowerCamelCase: Tuple , _lowerCamelCase: str ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = original_name.split("." )[0]
__lowerCamelCase : Any = key.split("." )
__lowerCamelCase : str = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
__lowerCamelCase : str = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
__lowerCamelCase : int = orig_block_num - offset
__lowerCamelCase : Optional[Any] = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
__lowerCamelCase : int = OrderedDict()
__lowerCamelCase , __lowerCamelCase : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCamelCase : Tuple = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCamelCase : int = key[: key.find("proj" )]
__lowerCamelCase : Optional[Any] = key.replace(_UpperCamelCase , F"""patch_embeddings.{total_embed_found}.""" )
__lowerCamelCase : Tuple = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCamelCase : Union[str, Any] = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCamelCase : Any = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCamelCase : str = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCamelCase : Any = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCamelCase : int = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCamelCase : Union[str, Any] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCamelCase : List[str] = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCamelCase : int = key.replace("head" , "classifier" )
__lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCamelCase : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: int ) -> Dict:
'''simple docstring'''
__lowerCamelCase : int = PoolFormerConfig()
# set attributes based on model_name
__lowerCamelCase : Any = "huggingface/label-files"
__lowerCamelCase : Optional[int] = model_name[-3:]
__lowerCamelCase : List[str] = 1000
__lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
__lowerCamelCase : List[Any] = (1, 1000)
# set config attributes
__lowerCamelCase : Any = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCamelCase : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : List[str] = idalabel
__lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCamelCase : Tuple = [2, 2, 6, 2]
__lowerCamelCase : List[Any] = [64, 128, 320, 512]
__lowerCamelCase : Any = 4.0
__lowerCamelCase : Optional[int] = 0.9
elif size == "s24":
__lowerCamelCase : str = [4, 4, 12, 4]
__lowerCamelCase : List[Any] = [64, 128, 320, 512]
__lowerCamelCase : Tuple = 4.0
__lowerCamelCase : Optional[int] = 0.9
elif size == "s36":
__lowerCamelCase : List[str] = [6, 6, 18, 6]
__lowerCamelCase : int = [64, 128, 320, 512]
__lowerCamelCase : List[Any] = 4.0
__lowerCamelCase : Any = 1E-6
__lowerCamelCase : int = 0.9
elif size == "m36":
__lowerCamelCase : str = [6, 6, 18, 6]
__lowerCamelCase : str = [96, 192, 384, 768]
__lowerCamelCase : List[str] = 4.0
__lowerCamelCase : Dict = 1E-6
__lowerCamelCase : Any = 0.95
elif size == "m48":
__lowerCamelCase : Union[str, Any] = [8, 8, 24, 8]
__lowerCamelCase : List[str] = [96, 192, 384, 768]
__lowerCamelCase : Tuple = 4.0
__lowerCamelCase : List[Any] = 1E-6
__lowerCamelCase : str = 0.95
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
__lowerCamelCase : Optional[int] = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : List[str] = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
__lowerCamelCase : Optional[int] = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
__lowerCamelCase : Tuple = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
__lowerCamelCase : List[Any] = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
__lowerCamelCase : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
__lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCamelCase : Optional[Any] = model(_UpperCamelCase )
__lowerCamelCase : List[str] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCamelCase : List[str] = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__lowerCamelCase : List[str] = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__lowerCamelCase : int = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__lowerCamelCase : Dict = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__lowerCamelCase : Optional[Any] = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 721 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 366 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase : Optional[int] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowerCamelCase : List[str] = []
lowerCamelCase : Dict = []
lowerCamelCase : Any = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowerCamelCase : Dict = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
lowerCamelCase : Optional[int] = 0
for log in Path().glob('*.log'):
lowerCamelCase : int = 0
with open(log, 'r') as f:
for line in f:
lowerCamelCase : Tuple = json.loads(line)
if line.get('nodeid', '') != "":
lowerCamelCase : Tuple = line['nodeid']
if line.get('duration', None) is not None:
lowerCamelCase : Dict = F"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase : int = []
log.unlink()
lowerCamelCase : Dict = ''
lowerCamelCase : Any = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase : int = []
lowerCamelCase : str = {}
for test in failed_tests:
lowerCamelCase : Union[str, Any] = test[0].split('::')
lowerCamelCase : Dict = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowerCamelCase : str = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase : Any = [test[0] for test in failed_table]
lowerCamelCase : Dict = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase : List[str] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase : Union[str, Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
lowerCamelCase : Any = len(err) + 1_0
lowerCamelCase : Dict = message[: 3_0_0_0 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase : List[Any] = 'No failed tests! 🤗'
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowerCamelCase : Tuple = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowerCamelCase : str = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowerCamelCase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase : Dict = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase : Union[str, Any] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowerCamelCase : Any = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase : Optional[Any] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase : Optional[Any] = row[0]
else:
lowerCamelCase : Union[str, Any] = ''
lowerCamelCase : str = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 170 |
def lowercase__( A ):
snake_case__ : Optional[Any] = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase__( A ):
snake_case__ : List[Any] = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
snake_case__ : Optional[Any] = remove_duplicates(key.upper() )
snake_case__ : Dict = len(A )
# First fill cipher with key characters
snake_case__ : Optional[int] = {alphabet[i]: char for i, char in enumerate(A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A ) , 2_6 ):
snake_case__ : List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case__ : Dict = alphabet[i - offset]
snake_case__ : Optional[int] = char
return cipher_alphabet
def lowercase__( A , A ):
return "".join(cipher_map.get(A , A ) for ch in message.upper() )
def lowercase__( A , A ):
snake_case__ : Union[str, Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A , A ) for ch in message.upper() )
def lowercase__( ):
snake_case__ : Union[str, Any] = input('Enter message to encode or decode: ' ).strip()
snake_case__ : str = input('Enter keyword: ' ).strip()
snake_case__ : Optional[int] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
snake_case__ : Optional[Any] = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
snake_case__ : int = create_cipher_map(A )
print(func(A , A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 170 | 1 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: int=0.999 , __lowerCamelCase: Tuple="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase: List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase: Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
lowercase_ = []
for i in range(__lowerCamelCase ):
lowercase_ = i / num_diffusion_timesteps
lowercase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ) , __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase , dtype=torch.floataa )
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = 0.0001 , UpperCAmelCase = 0.02 , UpperCAmelCase = "linear" , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 0 , UpperCAmelCase = "epsilon" , UpperCAmelCase = 1.0 , **UpperCAmelCase , ) -> int:
'''simple docstring'''
if kwargs.get("set_alpha_to_one" , UpperCAmelCase ) is not None:
lowercase_ = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
lowercase_ = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase_ = torch.tensor(UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ = torch.linspace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ = betas_for_alpha_bar(UpperCAmelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
lowercase_ = 1.0 - self.betas
lowercase_ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase_ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# setable values
lowercase_ = None
lowercase_ = torch.from_numpy(np.arange(0 , UpperCAmelCase ).copy().astype(np.intaa ) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
lowercase_ = num_inference_steps
lowercase_ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
self.timesteps += self.config.steps_offset
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
lowercase_ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase_ = self.alphas_cumprod[timestep]
lowercase_ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase_ = model_output
elif self.config.prediction_type == "sample":
lowercase_ = model_output
lowercase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase_ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 601 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
"""simple docstring"""
lowerCAmelCase__ = "all_checks"
lowerCAmelCase__ = "basic_checks"
lowerCAmelCase__ = "no_checks"
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict , __lowerCamelCase: Optional[int]=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase_ = " for " + verification_name if verification_name is not None else ""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: bool = True ):
'''simple docstring'''
if record_checksum:
lowercase_ = shaaaa()
with open(__lowerCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(__lowerCamelCase )
lowercase_ = m.hexdigest()
else:
lowercase_ = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 601 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Tuple ) -> int:
"""simple docstring"""
return abs(lowercase_ ) if a == 0 else greatest_common_divisor(b % a , lowercase_ )
def __lowerCAmelCase ( lowercase : Dict , lowercase : str ) -> int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case : Tuple = y, x % y
return abs(lowercase_ )
def __lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case : Dict = input("Enter two integers separated by comma (,): " ).split("," )
snake_case : str = int(nums[0] )
snake_case : Dict = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(lowercase_ , lowercase_ )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase_ , lowercase_ )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 178 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from __future__ import annotations
def A ( lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int , lowercase__ : Any , lowercase__ : int , ) -> str:
UpperCamelCase__ :List[Any] = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def A ( lowercase__ : Dict ) -> Dict:
UpperCamelCase__ :int = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print("""""" )
print(len(_UpperCAmelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 716 |
UpperCamelCase = 8.3_144_598
def A ( lowercase__ : float , lowercase__ : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCamelCase = 300
UpperCamelCase = 28
UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 383 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = 'RegNetConfig'
# Base docstring
UpperCamelCase = 'facebook/regnet-y-040'
UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
UpperCamelCase = 'facebook/regnet-y-040'
UpperCamelCase = 'tabby, tabby cat'
UpperCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "relu" , ) -> int:
super().__init__()
A__ = nn.Convad(
lowerCamelCase__ , lowerCamelCase__ , kernel_size=lowerCamelCase__ , stride=lowerCamelCase__ , padding=kernel_size // 2 , groups=lowerCamelCase__ , bias=lowerCamelCase__ , )
A__ = nn.BatchNormad(lowerCamelCase__ )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.convolution(lowerCamelCase__ )
A__ = self.normalization(lowerCamelCase__ )
A__ = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
super().__init__()
A__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
A__ = config.num_channels
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
A__ = self.embedder(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 ) -> Dict:
super().__init__()
A__ = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , stride=lowerCamelCase__ , bias=lowerCamelCase__ )
A__ = nn.BatchNormad(lowerCamelCase__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.convolution(lowerCamelCase__ )
A__ = self.normalization(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
super().__init__()
A__ = nn.AdaptiveAvgPoolad((1, 1) )
A__ = nn.Sequential(
nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = self.pooler(lowerCamelCase__ )
A__ = self.attention(lowerCamelCase__ )
A__ = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 ) -> List[str]:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = max(1 , out_channels // config.groups_width )
A__ = (
RegNetShortCut(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ ) , )
A__ = ACTaFN[config.hidden_act]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = hidden_state
A__ = self.layer(lowerCamelCase__ )
A__ = self.shortcut(lowerCamelCase__ )
hidden_state += residual
A__ = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 ) -> List[str]:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = max(1 , out_channels // config.groups_width )
A__ = (
RegNetShortCut(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ ) , )
A__ = ACTaFN[config.hidden_act]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = hidden_state
A__ = self.layer(lowerCamelCase__ )
A__ = self.shortcut(lowerCamelCase__ )
hidden_state += residual
A__ = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , ) -> Union[str, Any]:
super().__init__()
A__ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , ) , *[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for _ in range(depth - 1 )] , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.layers(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> int:
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True ) -> Union[str, Any]:
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(lowerCamelCase__ )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
class UpperCamelCase__ ( snake_case_ ):
"""simple docstring"""
A__ : List[str] = RegNetConfig
A__ : Optional[Any] = "regnet"
A__ : Dict = "pixel_values"
A__ : Any = True
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
if isinstance(lowerCamelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A__ = value
UpperCamelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
super().__init__(lowerCamelCase__ )
A__ = config
A__ = RegNetEmbeddings(lowerCamelCase__ )
A__ = RegNetEncoder(lowerCamelCase__ )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Any:
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(lowerCamelCase__ )
A__ = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ )
A__ = encoder_outputs[0]
A__ = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
super().__init__(lowerCamelCase__ )
A__ = config.num_labels
A__ = RegNetModel(lowerCamelCase__ )
# classification head
A__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Optional[int]:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.regnet(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(lowerCamelCase__ )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = "single_label_classification"
else:
A__ = "multi_label_classification"
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(lowerCamelCase__ , lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 104 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7_6_8 ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
UpperCamelCase = proj_size
UpperCamelCase = CLIPVisionModel(lowerCamelCase__ )
UpperCamelCase = PaintByExampleMapper(lowerCamelCase__ )
UpperCamelCase = nn.LayerNorm(config.hidden_size )
UpperCamelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
UpperCamelCase = self.model(pixel_values=lowerCamelCase__ )
UpperCamelCase = clip_output.pooler_output
UpperCamelCase = self.mapper(latent_states[:, None] )
UpperCamelCase = self.final_layer_norm(lowerCamelCase__ )
UpperCamelCase = self.proj_out(lowerCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = (config.num_hidden_layers + 1) // 5
UpperCamelCase = config.hidden_size
UpperCamelCase = 1
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , activation_fn='''gelu''' , attention_bias=lowerCamelCase__ )
for _ in range(lowerCamelCase__ )
] )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
for block in self.blocks:
UpperCamelCase = block(lowerCamelCase__ )
return hidden_states
| 212 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case = Features({'audio': Audio()} )
_snake_case = Features({'transcription': Value('string' )} )
_snake_case = 'audio'
_snake_case = 'transcription'
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
__UpperCamelCase = copy.deepcopy(self )
__UpperCamelCase = self.input_schema.copy()
__UpperCamelCase = features[self.audio_column]
__UpperCamelCase = input_schema
return task_template
@property
def A__ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 719 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase__ : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 451 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'realm'
def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=128 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=8 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu_new" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=256 ,__UpperCamelCase=10 ,__UpperCamelCase=1e-3 ,__UpperCamelCase=5 ,__UpperCamelCase=320 ,__UpperCamelCase=1335_3718 ,__UpperCamelCase=5000 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
# Common config
lowercase_ : Any = vocab_size
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Dict = hidden_size
lowercase_ : Union[str, Any] = retriever_proj_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Optional[int] = num_candidates
lowercase_ : int = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : List[str] = initializer_range
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[Any] = layer_norm_eps
# Reader config
lowercase_ : Optional[int] = span_hidden_size
lowercase_ : Optional[Any] = max_span_width
lowercase_ : Optional[Any] = reader_layer_norm_eps
lowercase_ : str = reader_beam_size
lowercase_ : Optional[Any] = reader_seq_len
# Retrieval config
lowercase_ : str = num_block_records
lowercase_ : List[str] = searcher_beam_size
| 425 | """simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[2, 2, 3, 2] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=["stage2", "stage3", "stage4"] ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = num_stages
lowercase_ : Dict = hidden_sizes
lowercase_ : int = depths
lowercase_ : Optional[Any] = is_training
lowercase_ : Tuple = use_labels
lowercase_ : int = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Any = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = out_features
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = scope
lowercase_ : Optional[int] = num_stages
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCamelCase ,loss_ignore_index=255 ,num_labels=self.num_labels ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = UperNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Tuple = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = _config_zero_init(__UpperCamelCase )
lowercase_ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason='UperNet does not have tied weights' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowercase_ : Union[str, Any] = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowercase_ : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : str = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowercase_ : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : Optional[int] = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 425 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
a__ : Union[str, Any] = cst_fwd.get(__a , np.inf )
a__ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
a__ : List[str] = new_cost_f
a__ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
a__ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase_ ( __a , __a , __a , __a ) -> int:
a__ : Any = -1
a__ : List[str] = set()
a__ : Optional[Any] = set()
a__ : Optional[int] = {source: 0}
a__ : Optional[Any] = {destination: 0}
a__ : List[Any] = {source: None}
a__ : Union[str, Any] = {destination: None}
a__ : PriorityQueue[Any] = PriorityQueue()
a__ : PriorityQueue[Any] = PriorityQueue()
a__ : int = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
a__ : Union[str, Any] = queue_forward.get()
visited_forward.add(__a )
a__ : List[Any] = queue_backward.get()
visited_backward.add(__a )
a__ : Union[str, Any] = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
a__ : Dict = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
a__ : Tuple = shortest_distance
return shortest_path_distance
UpperCamelCase : Optional[Any] = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase : List[Any] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : int = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A : List[str] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
A : Tuple = {
'gpt2': 1_0_2_4,
'gpt2-medium': 1_0_2_4,
'gpt2-large': 1_0_2_4,
'gpt2-xl': 1_0_2_4,
'distilgpt2': 1_0_2_4,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = GPTaTokenizer
def __init__(self : Union[str, Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str="<|endoftext|>" , _UpperCAmelCase : Dict="<|endoftext|>" , _UpperCAmelCase : int="<|endoftext|>" , _UpperCAmelCase : str=False , **_UpperCAmelCase : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = kwargs.pop("""add_bos_token""" , _UpperCAmelCase )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space:
lowercase__ = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**_UpperCAmelCase )
lowercase__ = add_prefix_space
def lowerCamelCase__ (self : Tuple , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowerCamelCase__ (self : int , _UpperCAmelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 15 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: List[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__: str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCAmelCase__: int = {
"facebook/blenderbot_small-90M": 512,
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = BlenderbotSmallTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] = add_prefix_space
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 311 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
class snake_case_ :
__lowerCamelCase : Any = None
@experimental
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) // num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % num_proc
SCREAMING_SNAKE_CASE_ : List[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
SCREAMING_SNAKE_CASE_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(SCREAMING_SNAKE_CASE )} objects' )
return mapped
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : Dict = None
| 311 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = """swin2sr"""
lowercase_ : List[Any] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self, lowerCamelCase=64, lowerCamelCase=1, lowerCamelCase=3, lowerCamelCase=1_80, lowerCamelCase=[6, 6, 6, 6, 6, 6], lowerCamelCase=[6, 6, 6, 6, 6, 6], lowerCamelCase=8, lowerCamelCase=2.0, lowerCamelCase=True, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.1, lowerCamelCase="gelu", lowerCamelCase=False, lowerCamelCase=0.0_2, lowerCamelCase=1E-5, lowerCamelCase=2, lowerCamelCase=1.0, lowerCamelCase="1conv", lowerCamelCase="pixelshuffle", **lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Optional[int] = image_size
_lowercase : Any = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Optional[Any] = embed_dim
_lowercase : int = depths
_lowercase : Optional[int] = len(lowerCamelCase)
_lowercase : str = num_heads
_lowercase : Any = window_size
_lowercase : int = mlp_ratio
_lowercase : Optional[int] = qkv_bias
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : str = drop_path_rate
_lowercase : List[str] = hidden_act
_lowercase : Tuple = use_absolute_embeddings
_lowercase : Any = layer_norm_eps
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = upscale
_lowercase : Tuple = img_range
_lowercase : List[str] = resi_connection
_lowercase : Optional[Any] = upsampler
| 89 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE__ = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE__ = "3.0.12"
SCREAMING_SNAKE_CASE__ = None
def lowerCamelCase ( ):
'''simple docstring'''
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ) -> int:
lowercase__ = lock_file
return None
def __str__( self ) -> Union[str, Any]:
lowercase__ = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = lock
return None
def __enter__( self ) -> Optional[int]:
return self.lock
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.lock.release()
return None
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Tuple:
lowercase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(UpperCAmelCase_ ,UpperCAmelCase_ )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def _a ( self ) -> List[str]:
return self._lock_file
@property
def _a ( self ) -> Optional[int]:
return self._timeout
@timeout.setter
def _a ( self ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = float(UpperCAmelCase_ )
return None
def _a ( self ) -> Optional[Any]:
raise NotImplementedError()
def _a ( self ) -> Optional[int]:
raise NotImplementedError()
@property
def _a ( self ) -> Dict:
return self._lock_file_fd is not None
def _a ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCAmelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self ,UpperCAmelCase_=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.release()
return None
def __del__( self ) -> Union[str, Any]:
self.release(force=UpperCAmelCase_ )
return None
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = os.path.basename(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = str(hash(UpperCAmelCase_ ) )
lowercase__ = filename[: max_length - len(UpperCAmelCase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
return path
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
lowercase__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> Any:
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(UpperCAmelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> int:
lowercase__ = os.statvfs(os.path.dirname(UpperCAmelCase_ ) ).f_namemax
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
try:
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_UN )
os.close(UpperCAmelCase_ )
return None
class snake_case (UpperCamelCase ):
def _a ( self ) -> Optional[Any]:
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
lowercase__ = fd
return None
def _a ( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE__ = None
if msvcrt:
SCREAMING_SNAKE_CASE__ = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE__ = UnixFileLock
else:
SCREAMING_SNAKE_CASE__ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 267 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Optional[Any] ) ->Dict:
"""simple docstring"""
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = eval_examples
SCREAMING_SNAKE_CASE : int = post_process_function
def _lowercase ( self : int , UpperCAmelCase__ : Optional[Dataset] = None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "eval" , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict[str, float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : int = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE : Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE : str = gen_kwargs
SCREAMING_SNAKE_CASE : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Optional[int] = self.get_eval_dataloader(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = time.time()
SCREAMING_SNAKE_CASE : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Dict = eval_loop(
UpperCAmelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
SCREAMING_SNAKE_CASE : int = compute_metrics
SCREAMING_SNAKE_CASE : int = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE : Dict = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE : int = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE : int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def _lowercase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str = "test" , **UpperCAmelCase__ : int ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : Tuple = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : int = self.compute_metrics
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = time.time()
SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Dict = eval_loop(
UpperCAmelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
SCREAMING_SNAKE_CASE : Optional[Any] = compute_metrics
SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : List[Any] = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , """predict""" )
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
| 446 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE : List[Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] ) ->DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase ( self : Optional[Any] ) ->DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase ( self : Optional[int] ) ->BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def _lowercase ( self : Any ) ->List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase ( self : List[Any] , UpperCAmelCase__ : bool ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """dataset""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
SCREAMING_SNAKE_CASE : Any = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ ) , )
return retriever
def _lowercase ( self : int ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(UpperCAmelCase__ , open(UpperCAmelCase__ , """wb""" ) )
SCREAMING_SNAKE_CASE : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE : Any = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : str = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : int = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Tuple ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Tuple ) ->Tuple:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE : str = [[5, 7], [1_0, 1_1]]
SCREAMING_SNAKE_CASE : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : int = retriever(UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = [[5, 7], [1_0, 1_1]]
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ )
self.assertEqual(
len(UpperCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , UpperCAmelCase__ ) # check for doc token related keys in dictionary.
| 446 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class a_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_lowercase = Vector()
def UpperCamelCase_ ( self ):
_lowercase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) , 4 )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2] )
_lowercase = Vector([1, 2, 3, 4, 5] )
_lowercase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_lowercase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2, 3] )
_lowercase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2, 3] )
_lowercase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2, 3] )
_lowercase = Vector([2, -1, 4] ) # for test of dot product
_lowercase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase_ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def UpperCamelCase_ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 2, 3] )
_lowercase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 0, 0, 0, 0, 0] )
_lowercase = x.copy()
self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) )
def UpperCamelCase_ ( self ):
_lowercase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowercase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowercase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_lowercase = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowercase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def UpperCamelCase_ ( self ):
_lowercase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowercase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def UpperCamelCase_ ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 287 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
_lowercase = k.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ) -> PegasusForConditionalGeneration:
_lowercase = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE_ )
_lowercase = PegasusConfig(**SCREAMING_SNAKE_CASE_ )
_lowercase = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
_lowercase = torch_model.model.state_dict()
_lowercase = {}
for k, v in tf_weights.items():
_lowercase = rename_state_dict_key(SCREAMING_SNAKE_CASE_ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_lowercase = v.T
_lowercase = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_lowercase = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_lowercase = mapping["""shared.weight"""]
_lowercase = mapping["""shared.weight"""]
_lowercase = {k: torch.zeros_like(SCREAMING_SNAKE_CASE_ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE_ )
_lowercase , _lowercase = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_lowercase = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
_lowercase = tf.train.list_variables(SCREAMING_SNAKE_CASE_ )
_lowercase = {}
_lowercase = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(SCREAMING_SNAKE_CASE_ , desc="""converting tf checkpoint to dict""" ):
_lowercase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowercase = tf.train.load_variable(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase = array
return tf_weights
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
# save tokenizer first
_lowercase = Path(SCREAMING_SNAKE_CASE_ ).parent.name
_lowercase = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
_lowercase = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=SCREAMING_SNAKE_CASE_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE_ )
# convert model
_lowercase = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE_ )
_lowercase = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
_lowercase = task_specific_params
_lowercase = convert_pegasus(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
_lowercase = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(SCREAMING_SNAKE_CASE_ , Path(SCREAMING_SNAKE_CASE_ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
A : str = parser.parse_args()
if args.save_dir is None:
A : Tuple = Path(args.tf_ckpt_path).parent.name
A : List[Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 287 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
snake_case = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class UpperCamelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = "focalnet"
def __init__( self , lowercase__=224 , lowercase__=4 , lowercase__=3 , lowercase__=96 , lowercase__=False , lowercase__=[192, 384, 768, 768] , lowercase__=[2, 2, 6, 2] , lowercase__=[2, 2, 2, 2] , lowercase__=[3, 3, 3, 3] , lowercase__="gelu" , lowercase__=4.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=False , lowercase__=1E-4 , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=0.02 , lowercase__=1E-5 , lowercase__=32 , lowercase__=None , lowercase__=None , **lowercase__ , ) -> str:
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = use_conv_embed
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = focal_levels
SCREAMING_SNAKE_CASE = focal_windows
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = use_layerscale
SCREAMING_SNAKE_CASE = layerscale_value
SCREAMING_SNAKE_CASE = use_post_layernorm
SCREAMING_SNAKE_CASE = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE = normalize_modulator
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = encoder_stride
SCREAMING_SNAKE_CASE = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
| 406 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = int(max_value - min_value ) + 1
SCREAMING_SNAKE_CASE = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 406 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ = SavedModel()
lowerCAmelCase__ = []
with open(os.path.join(_lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowerCAmelCase__ = json.load(_lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_lowercase )] )
with open(_lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowerCAmelCase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCAmelCase__ = sorted(_lowercase )
lowerCAmelCase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_lowercase )
if strict and len(_lowercase ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(_lowercase ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*_lowercase , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 339 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase = 3 , _lowercase = 7 , _lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = 0
a__ = 1
for current_denominator in range(1 , limit + 1 ):
a__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ = current_numerator
a__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 331 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = arr.split(""",""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE : str = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE : Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__UpperCAmelCase = input("""please input some numbers:""")
__UpperCAmelCase = SubArray(whole_array)
__UpperCAmelCase = array.solve_sub_array()
print(("""the results is:""", re))
| 79 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "dict"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ )
def __call__( self : int ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "dict"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None
def __call__( self : Tuple ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 79 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _a ):
def __init__( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' ,__lowerCAmelCase ,)
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase ) | 498 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase ) | 46 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase_ :
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a ) -> List[str]:
return None
class UpperCamelCase_ :
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a , a ) -> Dict:
return None
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCamelCase ( self ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'tf' , 12 , **a )
@require_torch
@slow
def _UpperCamelCase ( self ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'pt' , 12 , **a )
@require_torch
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
from transformers import BertModel
snake_case_ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(a ) )
vocab_file.flush()
snake_case_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case_ = BertModel(BertConfig(vocab_size=len(a ) ) )
model.save_pretrained(a )
self._test_export(a , 'pt' , 12 , a )
@require_tf
@slow
def _UpperCamelCase ( self ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ = self._test_export(a , 'tf' , 12 , **a )
snake_case_ = quantize(Path(a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCamelCase ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ = self._test_export(a , 'pt' , 12 , **a )
snake_case_ = quantize(a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCamelCase ( self , a , a , a , a=None , **a ) -> str:
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case_ = Path(a ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a , a , a , a , a , **a )
return path
except Exception as e:
self.fail(a )
@require_torch
@require_tokenizers
@slow
def _UpperCamelCase ( self ) -> List[str]:
from transformers import BertModel
snake_case_ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
snake_case_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(a , a , 'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCamelCase ( self ) -> Dict:
from transformers import TFBertModel
snake_case_ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
snake_case_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(a , a , 'tf' )
def _UpperCamelCase ( self , a , a , a ) -> Optional[Any]:
snake_case_ = FeatureExtractionPipeline(a , a )
snake_case_ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
snake_case_ , snake_case_ , snake_case_ , snake_case_ = infer_shapes(a , a )
# Assert all variables are present
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , a )
self.assertSequenceEqual(variable_names[3:] , a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = ['input_ids', 'attention_mask', 'token_type_ids']
snake_case_ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
snake_case_ , snake_case_ = ensure_valid_input(FuncContiguousArgs() , a , a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(a ) , set(a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case_ , snake_case_ = ensure_valid_input(FuncNonContiguousArgs() , a , a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a ) , 1 )
self.assertEqual(len(a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 607 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = KandinskyImgaImgPipeline
lowerCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def _UpperCamelCase ( self ) -> Optional[int]:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Dict:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return 1_00
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ = MultilingualCLIP(a )
snake_case_ = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case_ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case_ = UNetaDConditionModel(**a )
return model
@property
def _UpperCamelCase ( self ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
snake_case_ = DDIMScheduler(**a )
snake_case_ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , a , a=0 ) -> str:
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(a ).startswith('mps' ):
snake_case_ = torch.manual_seed(a )
else:
snake_case_ = torch.Generator(device=a ).manual_seed(a )
snake_case_ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> int:
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**a )
snake_case_ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case_ = pipe(**self.get_dummy_inputs(a ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case_ = 'A red cartoon frog, 4k'
snake_case_ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
snake_case_ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case_ = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a , a )
| 607 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__(self : List[Any] , **a__ : Optional[Any] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__snake_case = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__snake_case = kwargs.pop('''torchscript''' , self.torchscript )
__snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**a__ )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Trace the models using torchscript'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
A_ : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a (self : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__snake_case = torch.device('''cpu''' )
__snake_case = 0
elif is_torch_tpu_available():
__snake_case = xm.xla_device()
__snake_case = 0
else:
__snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a (self : Optional[Any] ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def a (self : Dict ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a (self : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a (self : int ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a (self : Dict ):
"""simple docstring"""
return self.n_gpu > 0
| 592 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 592 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[int] = b.T
lowercase__ : Optional[int] = np.sum(np.square(__lowerCamelCase ) , axis=1 )
lowercase__ : Any = np.sum(np.square(__lowerCamelCase ) , axis=0 )
lowercase__ : Optional[int] = np.matmul(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = x.reshape(-1 , 3 )
lowercase__ : List[str] = squared_euclidean_distance(__lowerCamelCase , __lowerCamelCase )
return np.argmin(__lowerCamelCase , axis=1 )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["pixel_values"]
def __init__( self : Optional[int] ,_snake_case : Optional[Union[List[List[int]], np.ndarray]] = None ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : bool = True ,_snake_case : bool = True ,**_snake_case : Dict ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = size if size is not None else {'''height''': 256, '''width''': 256}
lowercase__ : Tuple = get_size_dict(_snake_case )
lowercase__ : Tuple = np.array(_snake_case ) if clusters is not None else None
lowercase__ : Optional[Any] = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Dict = resample
lowercase__ : Optional[int] = do_normalize
lowercase__ : List[Any] = do_color_quantize
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : str = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
_snake_case ,size=(size['''height'''], size['''width''']) ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : np.ndarray ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Any = rescale(image=_snake_case ,scale=1 / 127.5 ,data_format=_snake_case )
lowercase__ : Any = image - 1
return image
def UpperCAmelCase ( self : str ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[List[List[int]], np.ndarray]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**_snake_case : int ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : List[str] = size if size is not None else self.size
lowercase__ : Any = get_size_dict(_snake_case )
lowercase__ : Union[str, Any] = resample if resample is not None else self.resample
lowercase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase__ : List[str] = clusters if clusters is not None else self.clusters
lowercase__ : Any = np.array(_snake_case )
lowercase__ : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
lowercase__ : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Any = [self.normalize(image=_snake_case ) for image in images]
if do_color_quantize:
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase__ : List[Any] = np.array(_snake_case )
lowercase__ : str = color_quantize(_snake_case ,_snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase__ : List[Any] = images.shape[0]
lowercase__ : Tuple = images.reshape(_snake_case ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase__ : int = list(_snake_case )
else:
lowercase__ : List[Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Optional[int] = {'''input_ids''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 706 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase_ = data_utils.TransfoXLTokenizer
lowerCAmelCase_ = data_utils.TransfoXLCorpus
lowerCAmelCase_ = data_utils
lowerCAmelCase_ = data_utils
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCamelCase , '''rb''' ) as fp:
lowercase__ : Any = pickle.load(__lowerCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase__ : str = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase__ : Dict = corpus.vocab.__dict__
torch.save(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCamelCase )
lowercase__ : int = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase__ : int = os.path.abspath(__lowerCamelCase )
lowercase__ : List[Any] = os.path.abspath(__lowerCamelCase )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase__ : Union[str, Any] = TransfoXLConfig()
else:
lowercase__ : str = TransfoXLConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : List[str] = TransfoXLLMHeadModel(__lowerCamelCase )
lowercase__ : Tuple = load_tf_weights_in_transfo_xl(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowercase__ : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 122 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__snake_case :Any ={
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class lowerCAmelCase__ :
def __init__( self : Dict , __UpperCamelCase : int = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
A = primes[group]['prime']
A = primes[group]['generator']
A = int(hexlify(urandom(32 ) ) , base=16 )
def __UpperCamelCase ( self : Dict ) -> str:
return hex(self.__private_key )[2:]
def __UpperCamelCase ( self : List[Any] ) -> str:
A = pow(self.generator , self.__private_key , self.prime )
return hex(__UpperCamelCase )[2:]
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : str ) -> str:
A = int(__UpperCamelCase , base=16 )
if not self.is_valid_public_key(__UpperCamelCase ):
raise ValueError('Invalid public key' )
A = pow(__UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__UpperCamelCase , (prime - 1) // 2 , __UpperCamelCase ) == 1
)
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 14 ) -> str:
A = int(__UpperCamelCase , base=16 )
A = int(__UpperCamelCase , base=16 )
A = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('Invalid public key' )
A = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_lowercase = 100
_lowercase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_lowercase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __UpperCamelCase ( a : int ) ->set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case = set()
snake_case = 42
snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __UpperCamelCase ( a : int = 5000 ) ->int | None:
for number_to_partition in range(1 , a ):
if len(partition(a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 342 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 708 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase__( )->Union[str, Any]:
A__ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return image
def UpperCamelCase__( UpperCamelCase__ : int )->Tuple:
A__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] )->Optional[Any]:
A__ = dct.pop(UpperCamelCase__ )
A__ = val
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] )->Union[str, Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
A__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
A__ = qkv_bias
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any )->Union[str, Any]:
A__ = 3_64 if '''coco''' in model_name else 2_24
A__ = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
A__ = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=False )->Dict:
A__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
A__ = tokenizer('''\n''' , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A__ , A__ = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A__ = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
A__ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
A__ , A__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ , A__ , A__ = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
A__ = original_model.state_dict()
A__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A__ = state_dict.pop(UpperCamelCase__ )
if key.startswith('''Qformer.bert''' ):
A__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
A__ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
A__ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
A__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
A__ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
A__ = key.replace('''t5''' , '''language''' )
A__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
A__ , A__ = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A__ = load_demo_image()
A__ = vis_processors['''eval'''](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A__ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase__ )
# create processor
A__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
A__ = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A__ = processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
A__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
A__ = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
A__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
A__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
A__ = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCamelCase__ )
else:
# cast to same type
A__ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
A__ = ''''''
A__ = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids.to(UpperCamelCase__ )
A__ = original_model.generate({'''image''': original_pixel_values} )
A__ = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , UpperCamelCase__ )
A__ = input_ids.shape[1]
A__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
A__ = [text.strip() for text in output_text]
print('''HF generation:''' , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
a__: Any = argparse.ArgumentParser()
a__: int = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a__: List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 212 | 0 |
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ):
A = 0
A = 0
A = {}
def UpperCamelCase ( self : Any , UpperCamelCase__ : List[Any] ):
if vertex not in self.adjacency:
A = {}
self.num_vertices += 1
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int ):
self.add_vertex(UpperCamelCase__ )
self.add_vertex(UpperCamelCase__ )
if head == tail:
return
A = weight
A = weight
def UpperCamelCase ( self : Optional[Any] ):
A = self.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCamelCase__ ) ):
A = list(edges[i] )
edges.sort(key=lambda UpperCamelCase__ : e[2] )
for i in range(len(UpperCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A = edges[i][2] + 1
for edge in edges:
A , A , A = edge
A = weight
A = weight
def __str__( self : Dict ):
A = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
A = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def UpperCamelCase ( self : List[Any] ):
A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase ( self : Union[str, Any] ):
return self.adjacency.keys()
@staticmethod
def UpperCamelCase ( UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None ):
A = Graph()
if vertices is None:
A = []
if edges is None:
A = []
for vertex in vertices:
g.add_vertex(UpperCamelCase__ )
for edge in edges:
g.add_edge(*UpperCamelCase__ )
return g
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
A = {}
A = {}
def __len__( self : int ):
return len(self.parent )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[Any] ):
if item in self.parent:
return self.find(UpperCamelCase__ )
A = item
A = 0
return item
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[Any] ):
if item not in self.parent:
return self.make_set(UpperCamelCase__ )
if item != self.parent[item]:
A = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
A = self.find(UpperCamelCase__ )
A = self.find(UpperCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A = roota
return roota
if self.rank[roota] < self.rank[roota]:
A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A = roota
return roota
return None
@staticmethod
def UpperCamelCase ( UpperCamelCase__ : str ):
A = graph.num_vertices
A = Graph.UnionFind()
A = []
while num_components > 1:
A = {}
for vertex in graph.get_vertices():
A = -1
A = graph.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A = edge
A = union_find.find(UpperCamelCase__ )
A = union_find.find(UpperCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A = cheap_edge[vertex]
if union_find.find(UpperCamelCase__ ) != union_find.find(UpperCamelCase__ ):
union_find.union(UpperCamelCase__ , UpperCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
A = num_components - 1
A = Graph.build(edges=UpperCamelCase__ )
return mst
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def snake_case (A_ :Dict ):
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def snake_case (A_ :Dict , A_ :int ):
'''simple docstring'''
a : Union[str, Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a : List[str] = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
a : Any = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
a : str = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
a : Dict = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
a : Tuple = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
a : Any = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
a : List[str] = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
a : int = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
a : List[Any] = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
a : List[str] = key.replace('image_encoder.module' , 'flava.image_model' )
a : Tuple = key.replace('text_encoder.module' , 'flava.text_model' )
a : Any = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
a : Optional[Any] = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
a : Optional[Any] = key.replace('text_projection' , 'flava.text_projection' )
a : Optional[Any] = key.replace('image_projection' , 'flava.image_projection' )
a : Optional[int] = value.float()
for key, value in codebook_state_dict.items():
a : Union[str, Any] = value
return upgrade
@torch.no_grad()
def snake_case (A_ :Tuple , A_ :str , A_ :Optional[Any] , A_ :Dict=None ):
'''simple docstring'''
if config_path is not None:
a : Any = FlavaConfig.from_pretrained(A_ )
else:
a : str = FlavaConfig()
a : Tuple = FlavaForPreTraining(A_ ).eval()
a : Union[str, Any] = convert_dalle_checkpoint(A_ , A_ , save_checkpoint=A_ )
if os.path.exists(A_ ):
a : List[Any] = torch.load(A_ , map_location='cpu' )
else:
a : str = torch.hub.load_state_dict_from_url(A_ , map_location='cpu' )
a : Tuple = upgrade_state_dict(A_ , A_ )
hf_model.load_state_dict(A_ )
a : str = hf_model.state_dict()
a : Union[str, Any] = count_parameters(A_ )
a : Optional[int] = count_parameters(A_ ) + count_parameters(A_ )
assert torch.allclose(A_ , A_ , atol=1E-3 )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
_UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_UpperCamelCase : Optional[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 721 |
"""simple docstring"""
import os
def snake_case ():
'''simple docstring'''
a : str = os.path.join(os.path.dirname(A_ ) , 'num.txt' )
with open(A_ ) as file_hand:
return str(sum(int(A_ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 118 | 0 |
class __UpperCamelCase :
def __init__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = name
UpperCAmelCase_ = val
def __str__( self : List[Any] ):
'''simple docstring'''
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : int , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
return self.val < other.val
class __UpperCamelCase :
def __init__( self : int , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = self.build_heap(lowerCAmelCase )
def __getitem__( self : int , lowerCAmelCase : List[Any] ):
'''simple docstring'''
return self.get_value(lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
return (idx - 1) // 2
def __A ( self : Tuple , lowerCAmelCase : str ):
'''simple docstring'''
return idx * 2 + 1
def __A ( self : Dict , lowerCAmelCase : int ):
'''simple docstring'''
return idx * 2 + 2
def __A ( self : List[Any] , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
return self.heap_dict[key]
def __A ( self : Tuple , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = len(lowerCAmelCase ) - 1
UpperCAmelCase_ = self.get_parent_idx(lowerCAmelCase )
for idx, i in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = idx
UpperCAmelCase_ = i.val
for i in range(lowerCAmelCase , -1 , -1 ):
self.sift_down(lowerCAmelCase , lowerCAmelCase )
return array
def __A ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
'''simple docstring'''
while True:
UpperCAmelCase_ = self.get_left_child_idx(lowerCAmelCase ) # noqa: E741
UpperCAmelCase_ = self.get_right_child_idx(lowerCAmelCase )
UpperCAmelCase_ = idx
if l < len(lowerCAmelCase ) and array[l] < array[idx]:
UpperCAmelCase_ = l
if r < len(lowerCAmelCase ) and array[r] < array[smallest]:
UpperCAmelCase_ = r
if smallest != idx:
UpperCAmelCase_ , UpperCAmelCase_ = array[smallest], array[idx]
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase_ = smallest
else:
break
def __A ( self : str , lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.get_parent_idx(lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[idx], self.heap[p]
UpperCAmelCase_ , UpperCAmelCase_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase_ = p
UpperCAmelCase_ = self.get_parent_idx(lowerCAmelCase )
def __A ( self : Any ):
'''simple docstring'''
return self.heap[0]
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[-1], self.heap[0]
UpperCAmelCase_ , UpperCAmelCase_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __A ( self : int , lowerCAmelCase : int ):
'''simple docstring'''
self.heap.append(lowerCAmelCase )
UpperCAmelCase_ = len(self.heap ) - 1
UpperCAmelCase_ = node.val
self.sift_up(len(self.heap ) - 1 )
def __A ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase_ = new_value
UpperCAmelCase_ = new_value
self.sift_up(self.idx_of_element[node] )
_a: List[str] = Node("""R""", -1)
_a: List[Any] = Node("""B""", 6)
_a: List[str] = Node("""A""", 3)
_a: str = Node("""X""", 1)
_a: str = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_a: Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 162 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_a: int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Any = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Union[str, Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Optional[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 162 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] =IFImgaImgSuperResolutionPipeline
__snake_case : int =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__snake_case : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
__snake_case : int =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=0) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def __UpperCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def __UpperCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def __UpperCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._test_save_load_local()
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 259 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | '''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : List[str] , __a : List[Any] , __a : Dict=None , __a : str=None , __a : List[Any]=None , __a : Union[str, Any]="resnet50" , __a : List[Any]=3 , __a : List[Any]=3_2 , __a : List[Any]=3 , __a : Optional[int]=True , __a : str=True , ) -> Any:
__UpperCAmelCase = parent
__UpperCAmelCase = out_indices if out_indices is not None else [4]
__UpperCAmelCase = stage_names
__UpperCAmelCase = out_features
__UpperCAmelCase = backbone
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = use_pretrained_backbone
__UpperCAmelCase = is_training
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self : Union[str, Any] ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[Any] ) -> Tuple:
__UpperCAmelCase = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def snake_case__ ( self : Dict ) -> Tuple:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def snake_case__ ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase = TimmBackboneModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case__ ( self : List[str] ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = '''resnet18'''
__UpperCAmelCase = '''microsoft/resnet-18'''
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def snake_case__ ( self : str ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def snake_case__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def snake_case__ ( self : str ) -> int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def snake_case__ ( self : str ) -> Dict:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Tuple ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def snake_case__ ( self : List[str] ) -> Dict:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def snake_case__ ( self : int ) -> int:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : int ) -> int:
pass
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = True
__UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCAmelCase = self.all_model_classes[0]
__UpperCAmelCase = model_class(__a )
model.to(__a )
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model(**__a )
__UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case__ ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCAmelCase = copy.deepcopy(__a )
__UpperCAmelCase = None
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__UpperCAmelCase = copy.deepcopy(__a )
__UpperCAmelCase = False
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
| 262 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
"""simple docstring"""
def __init__( self: List[Any] ):
"""simple docstring"""
A__ = []
A__ = 0
A__ = 0
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
return self.head == self.tail
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any ):
"""simple docstring"""
self.data.append(UpperCamelCase )
A__ = self.tail + 1
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.data[self.head]
A__ = self.head + 1
return ret
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return self.tail - self.head
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
A__ = None
A__ = None
A__ = 1
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return self.data
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return self.left
def UpperCamelCase ( self: int ):
"""simple docstring"""
return self.right
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return self.height
def UpperCamelCase ( self: Any , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
def UpperCamelCase ( self: str , UpperCamelCase: MyNode | None ):
"""simple docstring"""
A__ = node
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: MyNode | None ):
"""simple docstring"""
A__ = node
def UpperCamelCase ( self: List[str] , UpperCamelCase: int ):
"""simple docstring"""
A__ = height
def _snake_case ( UpperCAmelCase_ : MyNode | None ):
if node is None:
return 0
return node.get_height()
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if a > b:
return a
return b
def _snake_case ( UpperCAmelCase_ : MyNode ):
print("""left rotation node:""" , node.get_data() )
A__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase_ )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
A__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase_ )
return ret
def _snake_case ( UpperCAmelCase_ : MyNode ):
print("""right rotation node:""" , node.get_data() )
A__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase_ )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
A__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase_ )
return ret
def _snake_case ( UpperCAmelCase_ : MyNode ):
A__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase_ ) )
return right_rotation(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : MyNode ):
A__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase_ ) )
return left_rotation(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : MyNode | None , UpperCAmelCase_ : Any ):
if node is None:
return MyNode(UpperCAmelCase_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A__ = right_rotation(UpperCAmelCase_ )
else:
A__ = lr_rotation(UpperCAmelCase_ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A__ = rl_rotation(UpperCAmelCase_ )
else:
A__ = left_rotation(UpperCAmelCase_ )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase_ )
return node
def _snake_case ( UpperCAmelCase_ : MyNode ):
while True:
A__ = root.get_right()
if right_child is None:
break
A__ = right_child
return root.get_data()
def _snake_case ( UpperCAmelCase_ : MyNode ):
while True:
A__ = root.get_left()
if left_child is None:
break
A__ = left_child
return root.get_data()
def _snake_case ( UpperCAmelCase_ : MyNode , UpperCAmelCase_ : Any ):
A__ = root.get_left()
A__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A__ = get_left_most(UpperCAmelCase_ )
root.set_data(UpperCAmelCase_ )
root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
elif left_child is not None:
A__ = left_child
elif right_child is not None:
A__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) )
if get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A__ = left_rotation(UpperCAmelCase_ )
else:
A__ = rl_rotation(UpperCAmelCase_ )
elif get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A__ = right_rotation(UpperCAmelCase_ )
else:
A__ = lr_rotation(UpperCAmelCase_ )
A__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase_ )
return root
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = None
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return get_height(self.root )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
print("""insert:""" + str(UpperCamelCase ) )
A__ = insert_node(self.root , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
print("""delete:""" + str(UpperCamelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
A__ = del_node(self.root , UpperCamelCase )
def __str__( self: Union[str, Any] , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
A__ = """"""
A__ = MyQueue()
q.push(self.root )
A__ = self.get_height()
if layer == 0:
return output
A__ = 0
while not q.is_empty():
A__ = q.pop()
A__ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase )
q.push(UpperCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A__ = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , UpperCamelCase ) - 1:
A__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE_ : List[str] = AVLtree()
SCREAMING_SNAKE_CASE_ : str = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 720 |
"""simple docstring"""
from collections import defaultdict
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
A__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCAmelCase_ )
if ret % 2 == 0:
cuts.append(UpperCAmelCase_ )
return ret
def _snake_case ( ):
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = 1_0, 9
SCREAMING_SNAKE_CASE_ : Any = defaultdict(list)
SCREAMING_SNAKE_CASE_ : dict[int, bool] = {}
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 500 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if isinstance(A_, A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any], A_ : Tuple, A_ : Dict, A_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_lowerCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_lowerCamelCase : str = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( A_ : Optional[int], A_ : Tuple, A_ : Union[str, Any], A_ : Tuple, A_ : str=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.norm.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.norm.bias''']
_lowerCamelCase : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : str = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( A_ : str, A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : List[str] = {}
_lowerCamelCase : str = checkpoint['''time_embed.0.weight''']
_lowerCamelCase : Tuple = checkpoint['''time_embed.0.bias''']
_lowerCamelCase : str = checkpoint['''time_embed.2.weight''']
_lowerCamelCase : List[str] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : List[str] = checkpoint['''label_emb.weight''']
_lowerCamelCase : Any = checkpoint['''input_blocks.0.0.weight''']
_lowerCamelCase : Dict = checkpoint['''input_blocks.0.0.bias''']
_lowerCamelCase : str = unet_config['''down_block_types''']
_lowerCamelCase : int = unet_config['''layers_per_block''']
_lowerCamelCase : Any = unet_config['''attention_head_dim''']
_lowerCamelCase : Optional[int] = unet_config['''block_out_channels''']
_lowerCamelCase : int = 1
_lowerCamelCase : List[Any] = channels_list[0]
for i, layer_type in enumerate(A_ ):
_lowerCamelCase : Tuple = channels_list[i]
_lowerCamelCase : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
_lowerCamelCase : Union[str, Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
_lowerCamelCase : Optional[Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : int = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : Tuple = F'''down_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.1'''
_lowerCamelCase : Optional[Any] = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Dict = F'''down_blocks.{i}.downsamplers.0'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[Any] = convert_resnet(A_, A_, A_, A_ )
current_layer += 1
_lowerCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Union[str, Any] = '''mid_block.resnets.0'''
_lowerCamelCase : Dict = '''middle_block.0'''
_lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : List[str] = '''mid_block.attentions.0'''
_lowerCamelCase : Dict = '''middle_block.1'''
_lowerCamelCase : Union[str, Any] = convert_attention(A_, A_, A_, A_, A_ )
_lowerCamelCase : Tuple = '''mid_block.resnets.1'''
_lowerCamelCase : Optional[int] = '''middle_block.2'''
_lowerCamelCase : Any = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : str = 0
_lowerCamelCase : Any = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : List[str] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Optional[Any] = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : int = F'''output_blocks.{current_layer-1}.1'''
_lowerCamelCase : str = convert_resnet(A_, A_, A_, A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Any = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : str = F'''up_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Tuple = F'''output_blocks.{current_layer}.1'''
_lowerCamelCase : int = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Any = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_lowerCamelCase : List[str] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : str = checkpoint['''out.0.weight''']
_lowerCamelCase : Any = checkpoint['''out.0.bias''']
_lowerCamelCase : int = checkpoint['''out.2.weight''']
_lowerCamelCase : Optional[Any] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 83 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
lowercase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _A ( ):
lowercase__ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ = Image.open(dataset[4]["""file"""] )
lowercase__ = Image.open(dataset[5]["""file"""] )
lowercase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ = prepare_images()
# test non-batched
lowercase__ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowercase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowercase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ )
| 325 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict = ['image_processor']
lowercase : str = 'SamImageProcessor'
def __init__( self :Optional[Any] ,_UpperCamelCase :Tuple ):
super().__init__(_UpperCamelCase )
snake_case_ : Union[str, Any] = self.image_processor
snake_case_ : Optional[Any] = -1_0
snake_case_ : int = self.image_processor.size["""longest_edge"""]
def __call__( self :Optional[int] ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,**_UpperCamelCase :Any ,):
snake_case_ : str = self.image_processor(
_UpperCamelCase ,return_tensors=_UpperCamelCase ,**_UpperCamelCase ,)
# pop arguments that are not used in the foward but used nevertheless
snake_case_ : Any = encoding_image_processor["""original_sizes"""]
if hasattr(_UpperCamelCase ,"""numpy""" ): # Checks if Torch or TF tensor
snake_case_ : Dict = original_sizes.numpy()
snake_case_ , snake_case_ , snake_case_ : Tuple = self._check_and_preprocess_points(
input_points=_UpperCamelCase ,input_labels=_UpperCamelCase ,input_boxes=_UpperCamelCase ,)
snake_case_ : str = self._normalize_and_convert(
_UpperCamelCase ,_UpperCamelCase ,input_points=_UpperCamelCase ,input_labels=_UpperCamelCase ,input_boxes=_UpperCamelCase ,return_tensors=_UpperCamelCase ,)
return encoding_image_processor
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :str=None ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :List[str]="pt" ,):
if input_points is not None:
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
snake_case_ : List[str] = [
self._normalize_coordinates(self.target_size ,_UpperCamelCase ,original_sizes[0] ) for point in input_points
]
else:
snake_case_ : Union[str, Any] = [
self._normalize_coordinates(self.target_size ,_UpperCamelCase ,_UpperCamelCase )
for point, original_size in zip(_UpperCamelCase ,_UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
snake_case_ , snake_case_ : List[str] = self._pad_points_and_labels(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = np.array(_UpperCamelCase )
if input_labels is not None:
snake_case_ : Tuple = np.array(_UpperCamelCase )
if input_boxes is not None:
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
snake_case_ : Tuple = [
self._normalize_coordinates(self.target_size ,_UpperCamelCase ,original_sizes[0] ,is_bounding_box=_UpperCamelCase )
for box in input_boxes
]
else:
snake_case_ : str = [
self._normalize_coordinates(self.target_size ,_UpperCamelCase ,_UpperCamelCase ,is_bounding_box=_UpperCamelCase )
for box, original_size in zip(_UpperCamelCase ,_UpperCamelCase )
]
snake_case_ : List[Any] = np.array(_UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
snake_case_ : int = torch.from_numpy(_UpperCamelCase )
# boxes batch size of 1 by default
snake_case_ : Optional[int] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
snake_case_ : Tuple = tf.convert_to_tensor(_UpperCamelCase )
# boxes batch size of 1 by default
snake_case_ : Dict = tf.expand_dims(_UpperCamelCase ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
snake_case_ : Optional[Any] = torch.from_numpy(_UpperCamelCase )
# point batch size of 1 by default
snake_case_ : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
snake_case_ : Union[str, Any] = tf.convert_to_tensor(_UpperCamelCase )
# point batch size of 1 by default
snake_case_ : List[str] = tf.expand_dims(_UpperCamelCase ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
snake_case_ : Optional[int] = torch.from_numpy(_UpperCamelCase )
# point batch size of 1 by default
snake_case_ : int = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
snake_case_ : Dict = tf.convert_to_tensor(_UpperCamelCase )
# point batch size of 1 by default
snake_case_ : Tuple = tf.expand_dims(_UpperCamelCase ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def a__ ( self :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ):
snake_case_ : List[Any] = max([point.shape[0] for point in input_points] )
snake_case_ : str = []
for i, point in enumerate(_UpperCamelCase ):
if point.shape[0] != expected_nb_points:
snake_case_ : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
snake_case_ : Union[str, Any] = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(_UpperCamelCase )
snake_case_ : int = processed_input_points
return input_points, input_labels
def a__ ( self :Union[str, Any] ,_UpperCamelCase :int ,_UpperCamelCase :np.ndarray ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str]=False ):
snake_case_ , snake_case_ : Any = original_size
snake_case_ , snake_case_ : Optional[int] = self.image_processor._get_preprocess_shape(_UpperCamelCase ,longest_edge=_UpperCamelCase )
snake_case_ : Any = deepcopy(_UpperCamelCase ).astype(_UpperCamelCase )
if is_bounding_box:
snake_case_ : Any = coords.reshape(-1 ,2 ,2 )
snake_case_ : List[Any] = coords[..., 0] * (new_w / old_w)
snake_case_ : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case_ : Optional[Any] = coords.reshape(-1 ,4 )
return coords
def a__ ( self :Union[str, Any] ,_UpperCamelCase :int=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,):
if input_points is not None:
if hasattr(_UpperCamelCase ,"""numpy""" ): # Checks for TF or Torch tensor
snake_case_ : int = input_points.numpy().tolist()
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or not isinstance(input_points[0] ,_UpperCamelCase ):
raise ValueError("""Input points must be a list of list of floating points.""" )
snake_case_ : Dict = [np.array(_UpperCamelCase ) for input_point in input_points]
else:
snake_case_ : Union[str, Any] = None
if input_labels is not None:
if hasattr(_UpperCamelCase ,"""numpy""" ):
snake_case_ : Optional[Any] = input_labels.numpy().tolist()
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or not isinstance(input_labels[0] ,_UpperCamelCase ):
raise ValueError("""Input labels must be a list of list integers.""" )
snake_case_ : Union[str, Any] = [np.array(_UpperCamelCase ) for label in input_labels]
else:
snake_case_ : List[Any] = None
if input_boxes is not None:
if hasattr(_UpperCamelCase ,"""numpy""" ):
snake_case_ : int = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCamelCase ,_UpperCamelCase )
or not isinstance(input_boxes[0] ,_UpperCamelCase )
or not isinstance(input_boxes[0][0] ,_UpperCamelCase )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
snake_case_ : str = [np.array(_UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
snake_case_ : Any = None
return input_points, input_labels, input_boxes
@property
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCamelCase ) )
def a__ ( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Union[str, Any] ):
return self.image_processor.post_process_masks(*_UpperCamelCase ,**_UpperCamelCase ) | 267 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if num < 0:
return False
snake_case_ : int = num
snake_case_ : int = 0
while num > 0:
snake_case_ : Tuple = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod() | 267 | 1 |
_lowercase : Any ={
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_lowercase : Any ={value: key for key, value in MORSE_CODE_DICT.items()}
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : List[Any] = """Morse code here!"""
print(snake_case__ )
lowerCamelCase_ : str = encrypt(snake_case__ )
print(snake_case__ )
lowerCamelCase_ : Any = decrypt(snake_case__ )
print(snake_case__ )
if __name__ == "__main__":
main()
| 364 |
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( snake_case__ : float ,snake_case__ : float ,snake_case__ : int ):
'''simple docstring'''
__snake_case :List[Any] = x
__snake_case :str = y
for step in range(snake_case__ ): # noqa: B007
__snake_case :Tuple = a * a - b * b + x
__snake_case :Dict = 2 * a * b + y
__snake_case :str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( snake_case__ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase ( snake_case__ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ ,1 ,1 ) )
def UpperCamelCase ( snake_case__ : int = 800 ,snake_case__ : int = 600 ,snake_case__ : float = -0.6 ,snake_case__ : float = 0 ,snake_case__ : float = 3.2 ,snake_case__ : int = 50 ,snake_case__ : bool = True ,):
'''simple docstring'''
__snake_case :Tuple = Image.new("""RGB""" ,(image_width, image_height) )
__snake_case :Dict = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
__snake_case :Dict = figure_width / image_width * image_height
__snake_case :Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
__snake_case :List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__snake_case :Tuple = get_distance(snake_case__ ,snake_case__ ,snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__snake_case :Optional[int] = get_color_coded_rgb(snake_case__ )
else:
__snake_case :Optional[int] = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 455 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ):
lowercase__ = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
return image
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) )
lowercase__ = qkv_bias
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 364 if "coco" in model_name else 224
lowercase__ = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
lowercase__ , lowercase__ = get_blipa_config(SCREAMING_SNAKE_CASE_ )
lowercase__ = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
lowercase__ = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
lowercase__ , lowercase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowercase__ = "cuda:1" if torch.cuda.is_available() else "cpu"
lowercase__ = "cuda:2" if torch.cuda.is_available() else "cpu"
lowercase__ , lowercase__ , lowercase__ = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
original_model.eval()
print("Done!" )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith("Qformer.bert" ):
lowercase__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowercase__ = key.replace("self" , "attention" )
if "llm_proj" in key:
lowercase__ = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
lowercase__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
lowercase__ = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
lowercase__ = key.replace("t5" , "language" )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
lowercase__ = load_demo_image()
lowercase__ = "What is unusual about this image?"
# create processor
lowercase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ )
lowercase__ = InstructBlipProcessor(
image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , )
lowercase__ = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors["eval"](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
hf_model.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
lowercase__ = hf_model(**SCREAMING_SNAKE_CASE_ ).logits
else:
lowercase__ = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
lowercase__ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE_ )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase__ = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
print("Looks ok!" )
print("Generating with original model..." )
lowercase__ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
lowercase__ = hf_model.generate(
**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print("Original generation:" , SCREAMING_SNAKE_CASE_ )
lowercase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase__ = [text.strip() for text in output_text]
print("HF generation:" , SCREAMING_SNAKE_CASE_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
lowercase_ = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowercase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 37 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=3 ,_lowerCAmelCase=10 ,_lowerCAmelCase=[10, 20, 30, 40] ,_lowerCAmelCase=[1, 1, 2, 1] ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embeddings_size
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_labels
lowerCamelCase__ = scope
lowerCamelCase__ = len(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = self.get_config()
return config, pixel_values
def UpperCamelCase_ ( self ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxRegNetModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = FlaxRegNetForImageClassification(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = FlaxRegNetModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
return
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) ,expected_num_stages + 1 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = model_class(_lowerCAmelCase )
@jax.jit
def model_jitted(_lowerCAmelCase ,**_lowerCAmelCase ):
return model(pixel_values=_lowerCAmelCase ,**_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ = model_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ = model_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = model(**_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = (1, 10_00)
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
| 50 |
"""simple docstring"""
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase , lowerCamelCase = 1, 1
lowerCamelCase = 2
while True:
lowerCamelCase = 0
lowerCamelCase = fa + fa
lowerCamelCase , lowerCamelCase = fa, f
index += 1
for _ in str(snake_case__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 543 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 707 |
"""simple docstring"""
def A_ ( __lowercase ): # noqa: E741
UpperCamelCase_ : int =len(__lowercase )
UpperCamelCase_ : List[Any] =0
UpperCamelCase_ : Optional[int] =[0] * n
UpperCamelCase_ : Optional[int] =[False] * n
UpperCamelCase_ : List[Any] =[False] * n
def dfs(__lowercase , __lowercase , __lowercase , __lowercase ):
if parent == root:
out_edge_count += 1
UpperCamelCase_ : List[str] =True
UpperCamelCase_ : str =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase_ : Any =dfs(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCamelCase_ : str =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCamelCase_ : int =True
# AP found via cycle
if at == low[to]:
UpperCamelCase_ : List[str] =True
else:
UpperCamelCase_ : List[str] =min(low[at] , __lowercase )
return out_edge_count
for i in range(__lowercase ):
if not visited[i]:
UpperCamelCase_ : Any =0
UpperCamelCase_ : Tuple =dfs(__lowercase , __lowercase , -1 , __lowercase )
UpperCamelCase_ : List[Any] =out_edge_count > 1
for x in range(len(__lowercase ) ):
if is_art[x] is True:
print(__lowercase )
# Adjacency list of graph
__SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 395 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_a : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_a : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_a : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_a : Dict = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
_a : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
_a : Dict = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
_a : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
_a : Optional[Any] = np.expand_dims(test_image, axis=0)
_a : str = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_a : Optional[Any] = 'Normal'
if result[0][0] == 1:
_a : Optional[Any] = 'Abnormality detected'
| 598 |
from __future__ import annotations
def a_ ( __magic_name__ ) -> list[int]: # This function is recursive
"""simple docstring"""
snake_case : Union[str, Any] = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case : Union[str, Any] = array[0]
snake_case : Optional[Any] = False
snake_case : Dict = 1
snake_case : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case : int = True
snake_case : Any = [element for element in array[i:] if element >= array[i]]
snake_case : Tuple = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
snake_case : List[Any] = temp_array
else:
i += 1
snake_case : int = [element for element in array[1:] if element >= pivot]
snake_case : int = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 488 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
def lowerCamelCase__ ( lowercase , lowercase=0.999 , lowercase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(lowercase ):
SCREAMING_SNAKE_CASE : str = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase ) / alpha_bar_fn(lowercase ) , lowercase ) )
return torch.tensor(lowercase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 1000 , UpperCAmelCase_ : float = 0.0_001 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : float = 1.0 , **UpperCAmelCase_ : Dict , ):
if kwargs.get("set_alpha_to_one" , UpperCAmelCase_ ) is not None:
SCREAMING_SNAKE_CASE : int = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = kwargs["set_alpha_to_one"]
if trained_betas is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE : List[str] = torch.linspace(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE : int = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE : Tuple = 1.0 - self.betas
SCREAMING_SNAKE_CASE : Any = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : List[str] = 1.0
# setable values
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.arange(0 , UpperCAmelCase_ ).copy().astype(np.intaa ) )
def _A ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[int] = None ):
return sample
def _A ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE : Dict = num_inference_steps
SCREAMING_SNAKE_CASE : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : str = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.timesteps += self.config.steps_offset
def _A ( self : Any , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : bool = True , ):
# 1. get previous step value (=t+1)
SCREAMING_SNAKE_CASE : Tuple = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE : List[str] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
SCREAMING_SNAKE_CASE : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : List[str] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
def __len__( self : Dict ):
return self.config.num_train_timesteps
| 488 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = tuple[float, float, float]
_SCREAMING_SNAKE_CASE : Any = tuple[float, float, float]
def UpperCamelCase_( snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
snake_case_ = end_pointa[0] - end_pointa[0]
snake_case_ = end_pointa[1] - end_pointa[1]
snake_case_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase_( snake_case : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase_( snake_case : Any , snake_case : int ):
'''simple docstring'''
return tuple(round(_snake_case , _snake_case ) for x in vector ) == (0, 0, 0)
def UpperCamelCase_( snake_case : Tuple , snake_case : Any , snake_case : str , snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = create_vector(_snake_case , _snake_case )
snake_case_ = create_vector(_snake_case , _snake_case )
return is_zero_vector(get_ad_vectors_cross(_snake_case , _snake_case ) , _snake_case )
| 400 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Any )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 505 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 434 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = ScoreSdeVeScheduler()
__UpperCamelCase = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__A ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__A , return_dict=__A )[
0
]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : int ):
__UpperCamelCase = 'google/ncsnpp-church-256'
__UpperCamelCase = UNetaDModel.from_pretrained(__A )
__UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(__A )
__UpperCamelCase = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 434 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if not nums:
return 0
_lowerCamelCase : Optional[int] = nums[0]
_lowerCamelCase : Any = 0
for num in nums[1:]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a__ : int = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a__ : Union[str, Any] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
a__ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
def remove_articles(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams]
__SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = Counter()
for sgram, scount in sgramcounter.items():
__SCREAMING_SNAKE_CASE = scount * numref
__SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = Counter()
for cgram, ccount in cgramcounter.items():
__SCREAMING_SNAKE_CASE = ccount * numref
# KEEP
__SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep
__SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__SCREAMING_SNAKE_CASE = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep
__SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE = 1
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ )
# ADDITION
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
if addscore_precision > 0 or addscore_recall > 0:
__SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ssent.split(" " )
__SCREAMING_SNAKE_CASE = csent.split(" " )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for rsent in rsents:
__SCREAMING_SNAKE_CASE = rsent.split(" " )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
ragramslist.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
__SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
__SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
__SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
__SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
__SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
__SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4
__SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4
__SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ):
'''simple docstring'''
if lowercase:
__SCREAMING_SNAKE_CASE = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ )
elif tokenizer == "moses":
__SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ )
elif tokenizer == "penn":
__SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = sentence
if not return_str:
__SCREAMING_SNAKE_CASE = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )):
raise ValueError("Sources length must match predictions and references lengths." )
__SCREAMING_SNAKE_CASE = 0
for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] )
__SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ )
return 100 * sari_score
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )]
__SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu(
lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {}
result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
return result
| 682 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def A_ ( a=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def A_ ( a , a , a , a = False ):
"""simple docstring"""
if iterable:
SCREAMING_SNAKE_CASE_ : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
SCREAMING_SNAKE_CASE_ : Dict = TensorDataset(torch.as_tensor(range(a ) ) )
SCREAMING_SNAKE_CASE_ : str = DataLoader(a , batch_size=a )
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(a )
return dl
def A_ ( a , a , a , a , a , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
SCREAMING_SNAKE_CASE_ : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = create_accelerator(even_batches=a )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE_ : List[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
SCREAMING_SNAKE_CASE_ : Tuple = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE_ : Tuple = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def A_ ( a ):
"""simple docstring"""
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : int = create_accelerator(even_batches=a )
SCREAMING_SNAKE_CASE_ : Any = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_dataloader(a , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE_ : List[Any] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
SCREAMING_SNAKE_CASE_ : List[str] = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE_ : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_accelerator(even_batches=a )
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
SCREAMING_SNAKE_CASE_ : str = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
SCREAMING_SNAKE_CASE_ : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_accelerator()
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE_ : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_state
if __name__ == "__main__":
main()
| 353 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _A :
SCREAMING_SNAKE_CASE : List[str] = PegasusConfig
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=40 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_labels
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_pegasus_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFPegasusModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : Tuple = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ : Any = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE_ : Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : int = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-3 )
def A_ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : int = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Tuple = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class _A ( unittest.TestCase):
SCREAMING_SNAKE_CASE : Tuple = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE : Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE : Tuple = '''google/pegasus-xsum'''
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.translate_src_text(**_SCREAMING_SNAKE_CASE )
assert self.expected_text == generated_words
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(self.src_text , **_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )
return generated_words
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 353 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( _snake_case, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__UpperCamelCase : Optional[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 128, 128) ,rng=random.Random(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=lowerCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109 |
import cva
import numpy as np
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ : Any = k
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = img.shape
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : Tuple = img.copy()
SCREAMING_SNAKE_CASE_ : int = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = np.gradient(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = dx**2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dy**2
SCREAMING_SNAKE_CASE_ : Optional[int] = dx * dy
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.04
SCREAMING_SNAKE_CASE_ : int = self.window_size // 2
for y in range(_SCREAMING_SNAKE_CASE , h - offset ):
for x in range(_SCREAMING_SNAKE_CASE , w - offset ):
SCREAMING_SNAKE_CASE_ : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ : List[Any] = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ : List[str] = wxx + wyy
SCREAMING_SNAKE_CASE_ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase : List[Any] = HarrisCorner(0.04, 3)
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 511 | 0 |
from ...configuration_utils import PretrainedConfig
class __A ( a ):
__A = """bert-generation"""
def __init__( self , UpperCAmelCase_=50358 , UpperCAmelCase_=1024 , UpperCAmelCase_=24 , UpperCAmelCase_=16 , UpperCAmelCase_=4096 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-12 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCamelCase =vocab_size
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =hidden_act
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =max_position_embeddings
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =position_embedding_type
lowerCamelCase =use_cache
| 711 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : int =logging.get_logger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase =UniSpeechSatForSequenceClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase =downstream_dict["""projector.weight"""]
lowerCamelCase =downstream_dict["""projector.bias"""]
lowerCamelCase =downstream_dict["""model.post_net.linear.weight"""]
lowerCamelCase =downstream_dict["""model.post_net.linear.bias"""]
return model
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase =UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase =downstream_dict["""model.linear.weight"""]
lowerCamelCase =downstream_dict["""model.linear.bias"""]
return model
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase =UniSpeechSatForXVector.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase =downstream_dict["""connector.weight"""]
lowerCamelCase =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase =downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCamelCase =downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCamelCase =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase =checkpoint["""Downstream"""]
lowerCamelCase =UniSpeechSatConfig.from_pretrained(_UpperCAmelCase )
lowerCamelCase =WavaVecaFeatureExtractor.from_pretrained(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
lowerCamelCase =hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowerCamelCase =convert_classification(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowerCamelCase =convert_diarization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif arch.endswith("""ForXVector""" ):
lowerCamelCase =convert_xvector(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCamelCase =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCAmelCase__ : List[Any] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 269 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ : Tuple = logging.getLogger()
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__UpperCamelCase = parser.parse_args()
return args.f
def _lowercase ( __A ,__A="eval" ):
'''simple docstring'''
__UpperCamelCase = os.path.join(__A ,f"{split}_results.json" )
if os.path.exists(__A ):
with open(__A ,"""r""" ) as f:
return json.load(__A )
raise ValueError(f"can't find {path}" )
a__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_flax_glue.main()
__UpperCamelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_clm_flax.main()
__UpperCamelCase = get_results(lowercase )
self.assertLess(result["""eval_perplexity"""] , 1_0_0 )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_summarization_flax.main()
__UpperCamelCase = get_results(lowercase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 1_0 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_mlm_flax.main()
__UpperCamelCase = get_results(lowercase )
self.assertLess(result["""eval_perplexity"""] , 4_2 )
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_ta_mlm_flax.main()
__UpperCamelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def __lowerCamelCase ( self ) -> Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_flax_ner.main()
__UpperCamelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , """argv""" , lowercase ):
run_qa.main()
__UpperCamelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_f1"""] , 3_0 )
self.assertGreaterEqual(result["""eval_exact"""] , 3_0 )
| 601 |
'''simple docstring'''
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = 1
while len(__A ) < 1E6:
constant.append(str(__A ) )
i += 1
__UpperCamelCase = """""".join(__A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 601 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[Any] = 1.5
_lowercase: Optional[int] = int(factor * num_class_images )
_lowercase: List[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_UpperCamelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowercase: Tuple = client.query(text=_UpperCamelCase )
if len(_UpperCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
_lowercase: Dict = int(factor * num_images )
_lowercase: Optional[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_UpperCamelCase , aesthetic_weight=0.1 , )
_lowercase: str = 0
_lowercase: Union[str, Any] = 0
_lowercase: int = tqdm(desc='''downloading real regularization images''' , total=_UpperCamelCase )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
_lowercase: Union[str, Any] = class_images[count]
count += 1
try:
_lowercase: Tuple = requests.get(images['''url'''] )
if img.status_code == 200:
_lowercase: Any = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = argparse.ArgumentParser('''''' , add_help=_UpperCamelCase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_UpperCamelCase , type=_UpperCamelCase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_UpperCamelCase , type=_UpperCamelCase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=_UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
A__ : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 700 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ):
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_lowercase: int = timm.create_model('''levit_128s''' , pretrained=_UpperCamelCase )
else:
_lowercase: int = timm.create_model('''levit_128''' , pretrained=_UpperCamelCase )
if hidden_sizes == 192:
_lowercase: str = timm.create_model('''levit_192''' , pretrained=_UpperCamelCase )
if hidden_sizes == 256:
_lowercase: int = timm.create_model('''levit_256''' , pretrained=_UpperCamelCase )
if hidden_sizes == 384:
_lowercase: Dict = timm.create_model('''levit_384''' , pretrained=_UpperCamelCase )
from_model.eval()
_lowercase: Any = LevitForImageClassificationWithTeacher(_UpperCamelCase ).eval()
_lowercase: Union[str, Any] = OrderedDict()
_lowercase: Optional[Any] = from_model.state_dict()
_lowercase: List[Any] = list(from_model.state_dict().keys() )
_lowercase: int = list(our_model.state_dict().keys() )
print(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for i in range(len(_UpperCamelCase ) ):
_lowercase: Any = weights[og_keys[i]]
our_model.load_state_dict(_UpperCamelCase )
_lowercase: int = torch.randn((2, 3, 224, 224) )
_lowercase: List[str] = from_model(_UpperCamelCase )
_lowercase: Optional[int] = our_model(_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
_lowercase: Union[str, Any] = name
print(_UpperCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowercase: Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True ):
"""simple docstring"""
_lowercase: int = '''imagenet-1k-id2label.json'''
_lowercase: int = 1_000
_lowercase: Tuple = (1, num_labels)
_lowercase: Dict = '''huggingface/label-files'''
_lowercase: Optional[int] = num_labels
_lowercase: List[str] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: int = idalabel
_lowercase: Tuple = {v: k for k, v in idalabel.items()}
_lowercase: Tuple = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
_lowercase: Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_lowercase: int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
A__ : Optional[Any] = parser.parse_args()
A__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 272 | 0 |
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> int:
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCamelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase_ , id=UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if exitstatus == 5:
__lowerCamelCase = 0
# Doctest custom flag to ignore output.
__A = doctest.register_optionflag("IGNORE_RESULT")
__A = doctest.OutputChecker
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__A = CustomOutputChecker
__A = HfDoctestModule
__A = HfDocTestParser
| 469 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class a__( snake_case__ ):
def __init__( self ) -> Dict:
snake_case__ =[]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
self.events.append('on_init_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
self.events.append('on_train_begin' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
self.events.append('on_train_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> int:
self.events.append('on_epoch_begin' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
self.events.append('on_epoch_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
self.events.append('on_step_begin' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
self.events.append('on_step_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
self.events.append('on_evaluate' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
self.events.append('on_predict' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
self.events.append('on_save' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
self.events.append('on_log' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
self.events.append('on_prediction_step' )
@require_torch
class a__( unittest.TestCase ):
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =tempfile.mkdtemp()
def _lowercase ( self ) -> List[str]:
shutil.rmtree(self.output_dir )
def _lowercase ( self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=64 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase=False , **_UpperCAmelCase ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ =RegressionDataset(length=_UpperCAmelCase )
snake_case__ =RegressionDataset(length=_UpperCAmelCase )
snake_case__ =RegressionModelConfig(a=_UpperCAmelCase , b=_UpperCAmelCase )
snake_case__ =RegressionPreTrainedModel(_UpperCAmelCase )
snake_case__ =TrainingArguments(self.output_dir , disable_tqdm=_UpperCAmelCase , report_to=[] , **_UpperCAmelCase )
return Trainer(
_UpperCAmelCase , _UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , callbacks=_UpperCAmelCase , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
# Order doesn't matter
snake_case__ =sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
snake_case__ =sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , cba.__class__ )
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(cba.__class__ , _UpperCAmelCase )
else:
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase ) -> Any:
snake_case__ =['on_init_end', 'on_train_begin']
snake_case__ =0
snake_case__ =len(trainer.get_eval_dataloader() )
snake_case__ =['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(_UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.get_trainer()
snake_case__ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ =self.get_trainer(disable_tqdm=_UpperCAmelCase )
snake_case__ =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ =self.get_trainer()
snake_case__ =trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(cb.__class__ , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# We can also add, pop, or remove by instance
snake_case__ =self.get_trainer()
snake_case__ =trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ =self.get_trainer()
snake_case__ =trainer.callback_handler.callbacks[0]
snake_case__ =trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def _lowercase ( self ) -> Dict:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=_UpperCAmelCase )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# Independent log/save/eval
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# A bit of everything
snake_case__ =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
snake_case__ =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
| 538 | 0 |
from math import factorial
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]):
_lowercase: Optional[int] = real
if isinstance(_a , _a):
_lowercase: str = [1] * rank
else:
_lowercase: Dict = rank
def __repr__( self : Tuple):
return (
f"{self.real}+"
f"{'+'.join(str(_a)+'E'+str(n+1)for n,dual in enumerate(self.duals))}"
)
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1)
return Dual(self.real , _a)
def __add__( self : Optional[int] , _UpperCamelCase : List[Any]):
if not isinstance(_a , _a):
return Dual(self.real + other , self.duals)
_lowercase: Dict = self.duals.copy()
_lowercase: Dict = other.duals.copy()
if len(_a) > len(_a):
o_dual.extend([1] * (len(_a) - len(_a)))
elif len(_a) < len(_a):
s_dual.extend([1] * (len(_a) - len(_a)))
_lowercase: str = []
for i in range(len(_a)):
new_duals.append(s_dual[i] + o_dual[i])
return Dual(self.real + other.real , _a)
lowerCamelCase : Optional[int] = __add__
def __sub__( self : Optional[int] , _UpperCamelCase : Tuple):
return self + other * -1
def __mul__( self : List[Any] , _UpperCamelCase : Tuple):
if not isinstance(_a , _a):
_lowercase: Optional[int] = []
for i in self.duals:
new_duals.append(i * other)
return Dual(self.real * other , _a)
_lowercase: int = [0] * (len(self.duals) + len(other.duals) + 1)
for i, item in enumerate(self.duals):
for j, jtem in enumerate(other.duals):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals)):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals)):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _a)
lowerCamelCase : Dict = __mul__
def __truediv__( self : Dict , _UpperCamelCase : Dict):
if not isinstance(_a , _a):
_lowercase: List[str] = []
for i in self.duals:
new_duals.append(i / other)
return Dual(self.real / other , _a)
raise ValueError
def __floordiv__( self : Any , _UpperCamelCase : Tuple):
if not isinstance(_a , _a):
_lowercase: Optional[Any] = []
for i in self.duals:
new_duals.append(i // other)
return Dual(self.real // other , _a)
raise ValueError
def __pow__( self : str , _UpperCamelCase : Union[str, Any]):
if n < 0 or isinstance(_a , _a):
raise ValueError("power must be a positive integer")
if n == 0:
return 1
if n == 1:
return self
_lowercase: int = self
for _ in range(n - 1):
x *= self
return x
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not callable(__magic_name__ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__magic_name__ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("differentiate() requires an int as input for order" )
_lowercase: Tuple = Dual(__magic_name__ , 1 )
_lowercase: int = func(__magic_name__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase ( __magic_name__ ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BeitFeatureExtractor']
_SCREAMING_SNAKE_CASE : Optional[int] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : Union[List[PIL.Image.Image], np.ndarray]
snake_case : Optional[List[bool]]
snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 414 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCAmelCase_ : Optional[int] = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCAmelCase_ : List[str] = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = ''' Hello world! cécé herlolip'''
lowerCAmelCase_ : Optional[Any] = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = dct.pop(lowerCAmelCase )
UpperCAmelCase = val
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
UpperCAmelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
UpperCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
'''simple docstring'''
if not os.path.exists(lowerCAmelCase ):
UpperCAmelCase = torch.hub.load("""pytorch/fairseq""" , lowerCAmelCase ).eval()
else:
UpperCAmelCase = load_xsum_checkpoint(lowerCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCAmelCase = checkpoint_path.replace(""".""" , """-""" )
UpperCAmelCase = BartConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase = bart.encode(lowerCAmelCase ).unsqueeze(0 )
UpperCAmelCase = BartTokenizer.from_pretrained(lowerCAmelCase ).encode(lowerCAmelCase , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(lowerCAmelCase , lowerCAmelCase ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
UpperCAmelCase = bart.state_dict()
remove_ignore_keys_(lowerCAmelCase )
UpperCAmelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = BartForSequenceClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
UpperCAmelCase = bart.predict("""mnli""" , lowerCAmelCase , return_logits=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )[0] # logits
else: # no classification heads to worry about
UpperCAmelCase = bart.model.state_dict()
remove_ignore_keys_(lowerCAmelCase )
UpperCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
UpperCAmelCase = bart.extract_features(lowerCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
UpperCAmelCase = BartModel(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase ).model[0]
else:
UpperCAmelCase = BartForConditionalGeneration(lowerCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCAmelCase )
if hasattr(lowerCAmelCase , """lm_head""" ):
UpperCAmelCase = make_linear_from_emb(model.model.shared )
UpperCAmelCase = model.model(lowerCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 707 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase ):
UpperCAmelCase = burst_time[i]
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 999999999
UpperCAmelCase = 0
UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase = remaining_time[j]
UpperCAmelCase = j
UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase = remaining_time[short]
if minm == 0:
UpperCAmelCase = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase = False
# Find finish time of current process
UpperCAmelCase = increment_time + 1
# Calculate waiting time
UpperCAmelCase = finish_time - arrival_time[short]
UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_processes
for i in range(lowerCAmelCase ):
UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(lowerCAmelCase ):
UpperCAmelCase = total_waiting_time + waiting_time[i]
UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase_ : Optional[Any] = int(input())
lowerCAmelCase_ : List[Any] = [0] * no_of_processes
lowerCAmelCase_ : Optional[Any] = [0] * no_of_processes
lowerCAmelCase_ : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = map(int, input().split())
lowerCAmelCase_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase_ : str = burst_time
lowerCAmelCase_ : List[Any] = no_of_processes
lowerCAmelCase_ : int = waiting_time
lowerCAmelCase_ : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase_ : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 378 | 0 |
'''simple docstring'''
__a: Optional[Any] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 152 | '''simple docstring'''
__a: Any = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__a: List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a: Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 152 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : Optional[int] , A__ : int , A__ : Dict ) -> Tuple:
'''simple docstring'''
super().__init__(image_processor=A__ , feature_extractor=A__ )
a__ : Union[str, Any] = image_processor
a__ : int = feature_extractor
def __call__( self : Dict , A__ : Optional[int]=None , A__ : str=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Optional[Any]=False , A__ : Union[str, Any]=False , *A__ : Optional[Any] , **A__ : Tuple , ) -> int:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
a__ : int = None
if images is not None:
a__ : int = self.image_processor(A__ , mask_pixel=A__ , *A__ , **A__ )
if images_mixed is not None:
a__ : List[Any] = self.image_processor(A__ , is_mixed=A__ , *A__ , **A__ )
if audio is not None:
a__ : Dict = self.feature_extractor(
A__ , *A__ , sampling_rate=A__ , mask_audio=A__ , **A__ )
a__ : Any = {}
if audio is not None:
output_dict.update(A__ )
if images is not None:
output_dict.update(A__ )
if images_mixed_dict is not None:
output_dict.update(A__ )
return output_dict
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : Any = self.image_processor.model_input_names
a__ : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 340 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : Dict ):
a__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
a__ : int = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
a__ : Optional[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a__ : Any = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
a__ : Tuple = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
a__ : Any = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a__ : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
a__ : List[str] = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
a__ : List[Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
a__ : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
a__ : str = key[key.find('''block''' ) + len('''block''' )]
a__ : Dict = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
a__ : Optional[int] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
a__ : Optional[int] = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
a__ : Optional[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
a__ : Tuple = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
a__ : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
a__ : Any = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
a__ : List[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
a__ : Optional[Any] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a__ : List[str] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
a__ : int = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
a__ : int = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
a__ : Dict = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
a__ : Optional[int] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
a__ : List[Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
a__ : str = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
a__ : Any = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
a__ : Any = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
a__ : str = key.replace('''module.last_layer_depth''' , '''head.head''' )
a__ : Any = value
return new_state_dict
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a__ : int = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
a__ : Optional[int] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
a__ : str = kv_weight[
: config.hidden_sizes[i], :
]
a__ : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
a__ : int = kv_weight[
config.hidden_sizes[i] :, :
]
a__ : Tuple = kv_bias[config.hidden_sizes[i] :]
def __a ( ):
a__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Dict=None ):
a__ : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
a__ : List[Any] = GLPNImageProcessor()
# prepare image
a__ : Optional[Any] = prepare_img()
a__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
a__ : Dict = torch.load(lowerCAmelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
a__ : Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
a__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
a__ : str = model(lowerCAmelCase__ )
a__ : Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
a__ : Optional[int] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
a__ : int = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 340 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self : List[Any]) -> Union[str, Any]:
torch.manual_seed(0)
A_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __snake_case ( self : str) -> Optional[int]:
A_ = self.dummy_uncond_unet
A_ = ScoreSdeVeScheduler()
A_ = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase)
sde_ve.to(_lowercase)
sde_ve.set_progress_bar_config(disable=_lowercase)
A_ = torch.manual_seed(0)
A_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase).images
A_ = torch.manual_seed(0)
A_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase , return_dict=_lowercase)[
0
]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[Any]) -> Optional[int]:
A_ = 'google/ncsnpp-church-256'
A_ = UNetaDModel.from_pretrained(_lowercase)
A_ = ScoreSdeVeScheduler.from_pretrained(_lowercase)
A_ = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase)
sde_ve.to(_lowercase)
sde_ve.set_progress_bar_config(disable=_lowercase)
A_ = torch.manual_seed(0)
A_ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_lowercase).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 366 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' ,[
SplitDict(),
SplitDict({'train': SplitInfo(name='train' ,num_bytes=1337 ,num_examples=42 ,dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' ,num_bytes=1337 ,num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] ,)
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> List[str]:
A_ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A_ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A_ = None
# the split name of split_dict takes over the name of the split info object
A_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' ,[SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
A_ = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 366 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[int] = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.dummy_uncond_unet
lowerCamelCase = ScoreSdeVeScheduler()
lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a ).images
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a , return_dict=_a )[
0
]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """google/ncsnpp-church-256"""
lowerCamelCase = UNetaDModel.from_pretrained(_a )
lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(_a )
lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 533 | 0 |
import math
def _lowercase( __a : int ):
a__ =0
a__ =0
while num > 0:
a__ =num % 8
a__ =octal + (remainder * math.floor(math.pow(10 , __a ) ))
counter += 1
a__ =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__a )}"""
def _lowercase( ):
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 20 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""image_processor""", """tokenizer"""]
snake_case_ : Optional[int] = """LayoutLMv2ImageProcessor"""
snake_case_ : Dict = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
_snake_case : Tuple = kwargs.pop("""feature_extractor""")
_snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase , lowerCAmelCase)
def __call__( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""")
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""")
# first, apply the image processor
_snake_case : str = self.image_processor(images=lowerCAmelCase , return_tensors=lowerCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Optional[Any] = features["""words"""]
_snake_case : Tuple = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
# add pixel values
_snake_case : Optional[int] = features.pop("""pixel_values""")
if return_overflowing_tokens is True:
_snake_case : Tuple = self.get_overflowing_images(lowerCAmelCase , encoded_inputs["""overflow_to_sample_mapping"""])
_snake_case : int = images
return encoded_inputs
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
_snake_case : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(lowerCAmelCase) != len(lowerCAmelCase):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(lowerCAmelCase)} and {len(lowerCAmelCase)}''')
return images_with_overflow
def UpperCamelCase_ ( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCamelCase_ ( self : Tuple) -> Any:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase_ ( self : Any) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase , )
return self.image_processor
| 477 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = RobertaTokenizer
lowercase = RobertaTokenizerFast
lowercase = True
lowercase = {"cls_token": "<s>"}
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__UpperCAmelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__UpperCAmelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer_class.from_pretrained('roberta-base' )
__UpperCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=__UpperCAmelCase )
__UpperCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=__UpperCAmelCase )
__UpperCamelCase = tokenizer.encode(
'sequence builders' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = 'Encode this sequence.'
__UpperCamelCase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
__UpperCamelCase = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
__UpperCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = 'Encode <mask> sequence'
__UpperCamelCase = 'Encode <mask>sequence'
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase )
__UpperCamelCase = encoded.index(__UpperCAmelCase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = tokenizer.encode(__UpperCAmelCase )
__UpperCamelCase = encoded.index(__UpperCAmelCase )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = 'A, <mask> AllenNLP sentence.'
__UpperCamelCase = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
__UpperCamelCase = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCAmelCase ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __UpperCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , __UpperCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase = F'{text_of_1_token} {text_of_1_token}'
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCamelCase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
__UpperCamelCase = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
| 293 |
"""simple docstring"""
import argparse
import struct
import unittest
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = data
# Initialize hash values
__UpperCamelCase = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
__UpperCamelCase = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
__UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = b'\x80' + (b'\x00' * (63 - (len(__UpperCAmelCase ) + 8) % 64))
__UpperCamelCase = struct.pack('>Q' , (len(__UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCamelCase = list(struct.unpack('>16L' , __UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__UpperCamelCase = self.ror(__UpperCAmelCase , 6 ) ^ self.ror(__UpperCAmelCase , 11 ) ^ self.ror(__UpperCAmelCase , 25 )
__UpperCamelCase = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
__UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__UpperCamelCase = self.ror(__UpperCAmelCase , 2 ) ^ self.ror(__UpperCAmelCase , 13 ) ^ self.ror(__UpperCAmelCase , 22 )
__UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
__UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__UpperCamelCase = ''.join([hex(__UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
import hashlib
__UpperCamelCase = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(__UpperCAmelCase ).hash , hashlib.shaaaa(__UpperCAmelCase ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = bytes(snake_case , 'utf-8' )
print(SHAaaa(snake_case ).hash )
if __name__ == "__main__":
main()
| 293 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 330 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = KandinskyImgaImgPipeline
snake_case = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
snake_case = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
snake_case = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case = False
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_A = MultilingualCLIP(__UpperCAmelCase )
_A = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_A = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_unet
_A = self.dummy_movq
_A = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_A = DDIMScheduler(**__UpperCAmelCase )
_A = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=0 ):
'''simple docstring'''
_A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
_A = output.images
_A = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_A = "A red cartoon frog, 4k"
_A = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
_A = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_A = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
_A = torch.Generator(device="cpu" ).manual_seed(0 )
_A , _A = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_A = pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 330 | 1 |
"""simple docstring"""
import pprint
import requests
_A = 'https://zenquotes.io/api'
def SCREAMING_SNAKE_CASE ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def SCREAMING_SNAKE_CASE ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 721 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , _snake_case : Optional[int] , _snake_case : Dict ) -> Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : int , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : float = 0.0 , _snake_case : int = 50 , _snake_case : Optional[bool] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _snake_case ):
SCREAMING_SNAKE_CASE__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE__ = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(_snake_case , _snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
_snake_case , _snake_case , _snake_case , eta=_snake_case , use_clipped_model_output=_snake_case , generator=_snake_case ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 538 | 0 |
"""simple docstring"""
import numpy as np
def lowerCamelCase__ ( __snake_case ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
lowercase : Tuple = 2_5_6
# Modulus to hash a string
lowercase : Tuple = 1_0_0_0_0_0_3
def A_ ( A__ , A__ ) -> bool:
a__ : List[str] = len(A__ )
a__ : Union[str, Any] = len(A__ )
if p_len > t_len:
return False
a__ : str = 0
a__ : Optional[int] = 0
a__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(A__ ):
a__ : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a__ : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a__ : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a__ : Optional[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def A_ ( ) -> None:
a__ : List[str] = 'abc1abc12'
a__ : Tuple = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a__ : Union[str, Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(A__ , A__ ) and not rabin_karp(A__ , A__ )
# Test 2)
a__ : Union[str, Any] = 'ABABX'
a__ : Tuple = 'ABABZABABYABABX'
assert rabin_karp(A__ , A__ )
# Test 3)
a__ : Dict = 'AAAB'
a__ : Any = 'ABAAAAAB'
assert rabin_karp(A__ , A__ )
# Test 4)
a__ : Dict = 'abcdabcy'
a__ : Dict = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(A__ , A__ )
# Test 5)
a__ : List[str] = 'Lü'
a__ : Optional[Any] = 'Lüsai'
assert rabin_karp(A__ , A__ )
a__ : Any = 'Lue'
assert not rabin_karp(A__ , A__ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 302 | 0 |
from collections.abc import Iterable
from typing import Any
class A :
def __init__( self, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = value
lowerCAmelCase_ = None # Added in order to delete a node easier
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"{self.value}": (self.left, self.right)}, indent=1 )
class A :
def __init__( self, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if new_children is not None: # reset its kids
lowerCAmelCase_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCamelCase__ ): # If it is the right children
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.root is None
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = Node(UpperCamelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ = new_node
break
else:
lowerCAmelCase_ = parent_node.right
lowerCAmelCase_ = parent_node
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__ ):
"""simple docstring"""
for value in values:
self.__insert(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
lowerCAmelCase_ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ = node.right
return node
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ = None ):
"""simple docstring"""
if node is None:
lowerCAmelCase_ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ = self.root
while node.left is not None:
lowerCAmelCase_ = node.left
return node
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.search(UpperCamelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCamelCase__, UpperCamelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCamelCase__, node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCamelCase__, node.left )
else:
lowerCAmelCase_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if node:
self.inorder(UpperCamelCase__, node.left )
arr.append(node.value )
self.inorder(UpperCamelCase__, node.right )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
self.inorder(UpperCamelCase__, UpperCamelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
if curr_node is not None:
lowerCAmelCase_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __UpperCamelCase ( ):
lowerCAmelCase_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ = BinarySearchTree()
for i in testlist:
t.insert(_A )
# Prints all the elements of the list in order traversal
print(_A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_A )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 325 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = GPTSanJapaneseTokenizer
__snake_case = False
__snake_case = {'do_clean_text': False, 'add_prefix_space': False}
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCAmelCase_ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file, '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowerCAmelCase_ = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.get_input_output_texts(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。 こんばんは、㔺界。'''
lowerCAmelCase_ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowerCAmelCase_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowerCAmelCase_ = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。'''
lowerCAmelCase_ = '''こんばんは、㔺界。😀'''
lowerCAmelCase_ = '''こんにちは、世界。こんばんは、世界。😀'''
lowerCAmelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ = tokenizer.encode('''''', prefix_text=prefix_text + input_text )
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, prefix_text=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。'''
lowerCAmelCase_ = '''こんばんは、㔺界。😀'''
lowerCAmelCase_ = len(tokenizer.encode(UpperCamelCase__ ) ) - 2
lowerCAmelCase_ = len(tokenizer.encode(UpperCamelCase__ ) ) - 2
lowerCAmelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ = tokenizer('''''', prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ = tokenizer(UpperCamelCase__, prefix_text=UpperCamelCase__ ).token_type_ids
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase_ = tokenizer.encode('''あンいワ''' )
lowerCAmelCase_ = tokenizer.encode('''''', prefix_text='''あンいワ''' )
lowerCAmelCase_ = tokenizer.encode('''いワ''', prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ), tokenizer.decode(UpperCamelCase__ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ), tokenizer.decode(UpperCamelCase__ ) )
self.assertNotEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase_ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowerCAmelCase_ = tokenizer(UpperCamelCase__, padding=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.batch_encode_plus(UpperCamelCase__, padding=UpperCamelCase__ )
# fmt: off
lowerCAmelCase_ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids, UpperCamelCase__ )
self.assertListEqual(x_token.token_type_ids, UpperCamelCase__ )
self.assertListEqual(x_token.attention_mask, UpperCamelCase__ )
self.assertListEqual(x_token_a.input_ids, UpperCamelCase__ )
self.assertListEqual(x_token_a.token_type_ids, UpperCamelCase__ )
self.assertListEqual(x_token_a.attention_mask, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 325 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A__ : List[Any] = '''<<<<<<< This should probably be modified because it mentions: '''
A__ : Optional[Any] = '''=======
>>>>>>>
'''
A__ : Tuple = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
A__ : Tuple = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def a_ ( _UpperCAmelCase : Namespace ) -> Tuple:
return ConvertCommand(args.tfds_path ,args.datasets_directory )
class snake_case__ ( __a ):
@staticmethod
def A_ ( __a : ArgumentParser ) -> Any:
'''simple docstring'''
__snake_case : int = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self : List[str] , __a : str , __a : str , *__a : Any ) -> int:
'''simple docstring'''
__snake_case : List[str] = get_logger('datasets-cli/converting' )
__snake_case : int = tfds_path
__snake_case : Union[str, Any] = datasets_directory
def A_ ( self : Tuple ) -> int:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__snake_case : Optional[int] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__snake_case : str = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__snake_case : int = []
__snake_case : List[str] = []
__snake_case : str = {}
if os.path.isdir(self._tfds_path ):
__snake_case : Tuple = os.listdir(lowerCAmelCase__ )
else:
__snake_case : List[str] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
__snake_case : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case : Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isfile(lowerCAmelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
__snake_case : Any = f.readlines()
__snake_case : Any = []
__snake_case : str = False
__snake_case : Union[str, Any] = False
__snake_case : Optional[int] = []
for line in lines:
__snake_case : str = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case : str = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__snake_case : Optional[Any] = ''
continue
elif "from absl import logging" in out_line:
__snake_case : Dict = 'from datasets import logging\n'
elif "getLogger" in out_line:
__snake_case : Any = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case : Optional[Any] = True
__snake_case : Union[str, Any] = list(filter(lambda __a : e in out_line , lowerCAmelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__ ) + '\n' )
out_lines.append(lowerCAmelCase__ )
out_lines.append(lowerCAmelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case : Optional[Any] = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case : Optional[Any] = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__snake_case : Tuple = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case : Any = True
out_lines.append(lowerCAmelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case : Optional[int] = f_name.replace('.py' , '' )
__snake_case : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case : Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__snake_case : Optional[Any] = os.path.basename(lowerCAmelCase__ )
__snake_case : Optional[int] = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 286 |
from __future__ import annotations
import math
def _lowercase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool , __UpperCamelCase : list[int] , __UpperCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
)
def _lowercase ( ):
snake_case__ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case__ = math.log(len(__UpperCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 214 | 0 |
import math
import qiskit
def lowercase ( a = 1 , a = 1 , a = 1 ):
'''simple docstring'''
if (
isinstance(a , a )
or isinstance(a , a )
or isinstance(a , a )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(a ) != input_a)
or (math.floor(a ) != input_a)
or (math.floor(a ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
SCREAMING_SNAKE_CASE_ :List[str] = qiskit.QuantumRegister(4 , "qr" )
SCREAMING_SNAKE_CASE_ :str = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
SCREAMING_SNAKE_CASE_ :List[Any] = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE_ :Optional[Any] = qiskit.QuantumCircuit(a , a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a ) # measure the last two qbits
SCREAMING_SNAKE_CASE_ :Dict = qiskit.Aer.get_backend("aer_simulator" )
SCREAMING_SNAKE_CASE_ :str = qiskit.execute(a , a , shots=1000 )
return job.result().get_counts(a )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 709 |
SCREAMING_SNAKE_CASE__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE__ = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 140 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 561 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase :Optional[int] = get_tests_dir('''fixtures''')
lowerCAmelCase :Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase :Tuple = get_tests_dir('''fixtures/dummy-config.json''')
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : str = 0
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Union[str, Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('feature_extractor_type' )
__magic_name__ : int = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
__magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
__magic_name__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : Any ) -> Tuple:
with self.assertRaisesRegex(
_A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A , revision='aaaaaa' )
def __lowerCAmelCase ( self : Dict ) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
__magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
__magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def __lowerCAmelCase ( self : str ) -> Tuple:
try:
AutoConfig.register('custom' , _A )
AutoFeatureExtractor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ : str = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Any = True
try:
AutoConfig.register('custom' , _A )
AutoFeatureExtractor.register(_A , _A )
# If remote code is not set, the default is to use local
__magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_A , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 561 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase_: List[str] = True
except ImportError:
lowerCAmelCase_: Dict = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase_: Dict = _get_torch_home()
except ImportError:
lowerCAmelCase_: Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCAmelCase_: List[str] = os.path.join(torch_cache_home, "transformers")
lowerCAmelCase_: Dict = "https://cdn.huggingface.co"
lowerCAmelCase_: List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCAmelCase_: str = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCAmelCase_: Any = os.path.join(PATH, "config.yaml")
lowerCAmelCase_: Union[str, Any] = os.path.join(PATH, "attributes.txt")
lowerCAmelCase_: Dict = os.path.join(PATH, "objects.txt")
lowerCAmelCase_: int = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCAmelCase_: List[str] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase_: Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase_: Any = "pytorch_model.bin"
lowerCAmelCase_: Optional[int] = "config.yaml"
def __a ( A=OBJECTS , A=ATTRIBUTES ):
'''simple docstring'''
lowercase__ = []
with open(A ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
lowercase__ = []
with open(A ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __a ( A ):
'''simple docstring'''
lowercase__ = OrderedDict()
with open(A , "rb" ) as f:
lowercase__ = pkl.load(A )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowercase__ = ckp.pop(A )
if isinstance(A , np.ndarray ):
lowercase__ = torch.tensor(A )
else:
assert isinstance(A , torch.tensor ), type(A )
lowercase__ = v
return r
class a__ :
snake_case_ = {}
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = "root", _UpperCAmelCase=0 ):
'''simple docstring'''
lowercase__ = name
lowercase__ = level
lowercase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowercase__ = copy.deepcopy(_UpperCAmelCase )
lowercase__ = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = Config(_UpperCAmelCase, name=_UpperCAmelCase, level=level + 1 )
lowercase__ = v
setattr(self, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = val
lowercase__ = val
lowercase__ = key.split("." )
lowercase__ = len(_UpperCAmelCase ) - 1
lowercase__ = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self, _UpperCAmelCase ) and isinstance(getattr(self, _UpperCAmelCase ), _UpperCAmelCase ):
setattr(getattr(self, _UpperCAmelCase ), ".".join(levels[i:] ), _UpperCAmelCase )
if l == last_level:
lowercase__ = val
else:
lowercase__ = pointer[l]
def snake_case__ ( self ):
'''simple docstring'''
return self._pointer
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
with open(F'''{file_name}''', "w" ) as stream:
dump(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
with open(F'''{file_name}''', "w" ) as stream:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
with open(_UpperCAmelCase ) as stream:
lowercase__ = load(_UpperCAmelCase, Loader=_UpperCAmelCase )
return data
def __str__( self ):
'''simple docstring'''
lowercase__ = " "
if self._name != "root":
lowercase__ = F'''{t * (self._level-1)}{self._name}:\n'''
else:
lowercase__ = ""
lowercase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n'''
lowercase__ = level
return r[:-1]
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = kwargs.pop("cache_dir", _UpperCAmelCase )
lowercase__ = kwargs.pop("force_download", _UpperCAmelCase )
lowercase__ = kwargs.pop("resume_download", _UpperCAmelCase )
lowercase__ = kwargs.pop("proxies", _UpperCAmelCase )
lowercase__ = kwargs.pop("local_files_only", _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
lowercase__ = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
lowercase__ = pretrained_model_name_or_path
else:
lowercase__ = hf_bucket_url(_UpperCAmelCase, filename=_UpperCAmelCase, use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowercase__ = cached_path(
_UpperCAmelCase, cache_dir=_UpperCAmelCase, force_download=_UpperCAmelCase, proxies=_UpperCAmelCase, resume_download=_UpperCAmelCase, local_files_only=_UpperCAmelCase, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowercase__ = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
lowercase__ = "Can't load config for"
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def __a ( A ):
'''simple docstring'''
lowercase__ = torch.load("dump.pt" , map_location=in_tensor.device )
lowercase__ = in_tensor.numpy()
lowercase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A , A , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(A , A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __a ( A ):
'''simple docstring'''
lowercase__ = urlparse(A )
return parsed.scheme in ("http", "https")
def __a ( A , A , A=True ):
'''simple docstring'''
lowercase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowercase__ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __a ( A , A , A=None , A=0 , A=None , ):
'''simple docstring'''
lowercase__ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A , A ):
ua += "; " + "; ".join("{}/{}".format(A , A ) for k, v in user_agent.items() )
elif isinstance(A , A ):
ua += "; " + user_agent
lowercase__ = {"user-agent": ua}
if resume_size > 0:
lowercase__ = "bytes=%d-" % (resume_size,)
lowercase__ = requests.get(A , stream=A , proxies=A , headers=A )
if response.status_code == 4_16: # Range not satisfiable
return
lowercase__ = response.headers.get("Content-Length" )
lowercase__ = resume_size + int(A ) if content_length is not None else None
lowercase__ = tqdm(
unit="B" , unit_scale=A , total=A , initial=A , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A ) )
temp_file.write(A )
progress.close()
def __a ( A , A=None , A=False , A=None , A=10 , A=False , A=None , A=False , ):
'''simple docstring'''
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(A , A ):
lowercase__ = str(A )
os.makedirs(A , exist_ok=A )
lowercase__ = None
if not local_files_only:
try:
lowercase__ = requests.head(A , allow_redirects=A , proxies=A , timeout=A )
if response.status_code == 2_00:
lowercase__ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowercase__ = url_to_filename(A , A )
# get cache path to put the file
lowercase__ = os.path.join(A , A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A ):
return cache_path
else:
lowercase__ = [
file
for file in fnmatch.filter(os.listdir(A ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A ) > 0:
return os.path.join(A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowercase__ = cache_path + ".lock"
with FileLock(A ):
# If the download just completed while the lock was activated.
if os.path.exists(A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowercase__ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A , "a+b" ) as f:
yield f
lowercase__ = _resumable_file_manager
if os.path.exists(A ):
lowercase__ = os.stat(A ).st_size
else:
lowercase__ = 0
else:
lowercase__ = partial(tempfile.NamedTemporaryFile , dir=A , delete=A )
lowercase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A , temp_file.name , )
http_get(
A , A , proxies=A , resume_size=A , user_agent=A , )
os.replace(temp_file.name , A )
lowercase__ = {"url": url, "etag": etag}
lowercase__ = cache_path + ".json"
with open(A , "w" ) as meta_file:
json.dump(A , A )
return cache_path
def __a ( A , A=None ):
'''simple docstring'''
lowercase__ = url.encode("utf-8" )
lowercase__ = shaaaa(A )
lowercase__ = url_hash.hexdigest()
if etag:
lowercase__ = etag.encode("utf-8" )
lowercase__ = shaaaa(A )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __a ( A , A=None , A=False , A=None , A=False , A=None , A=False , A=False , A=False , ):
'''simple docstring'''
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(A , A ):
lowercase__ = str(A )
if isinstance(A , A ):
lowercase__ = str(A )
if is_remote_url(A ):
# URL, so get it from the cache (downloading if necessary)
lowercase__ = get_from_cache(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , user_agent=A , local_files_only=A , )
elif os.path.exists(A ):
# File, and it exists.
lowercase__ = url_or_filename
elif urlparse(A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A ) )
if extract_compressed_file:
if not is_zipfile(A ) and not tarfile.is_tarfile(A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowercase__ , lowercase__ = os.path.split(A )
lowercase__ = output_file.replace("." , "-" ) + "-extracted"
lowercase__ = os.path.join(A , A )
if os.path.isdir(A ) and os.listdir(A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowercase__ = output_path + ".lock"
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
os.makedirs(A )
if is_zipfile(A ):
with ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
elif tarfile.is_tarfile(A ):
lowercase__ = tarfile.open(A )
tar_file.extractall(A )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A ) )
return output_path_extracted
return output_path
def __a ( A , A="," ):
'''simple docstring'''
assert isinstance(A , A )
if os.path.isfile(A ):
with open(A ) as f:
lowercase__ = eval(f.read() )
else:
lowercase__ = requests.get(A )
try:
lowercase__ = requests.json()
except Exception:
lowercase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowercase__ = eval(A )
except Exception:
lowercase__ = data.split("\n" )
req.close()
return data
def __a ( A ):
'''simple docstring'''
lowercase__ = requests.get(A )
lowercase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __a ( A ):
'''simple docstring'''
lowercase__ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A )
with open(A , "rb" ) as stream:
lowercase__ = pkl.load(A )
lowercase__ = weights.pop("model" )
lowercase__ = {}
for k, v in model.items():
lowercase__ = torch.from_numpy(A )
if "running_var" in k:
lowercase__ = torch.tensor([0] )
lowercase__ = k.replace("running_var" , "num_batches_tracked" )
lowercase__ = zero
return new
def __a ( ):
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(A , os.pardir ) )}/demo.ipynb''' )
def __a ( A , A="RGB" ):
'''simple docstring'''
assert isinstance(A , A )
if os.path.isfile(A ):
lowercase__ = cva.imread(A )
else:
lowercase__ = get_image_from_url(A )
assert img is not None, f'''could not connect to: {im}'''
lowercase__ = cva.cvtColor(A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowercase__ = img[:, :, ::-1]
return img
def __a ( A , A=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A ) , A ))
| 668 | """simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
from math import factorial
def _A ( lowerCAmelCase_ : int = 20 ):
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 61 |
'''simple docstring'''
import copy
import re
class lowerCamelCase__:
UpperCamelCase : Dict = "hp"
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : str = None
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = prefix
__lowercase = defaults
cls.build_naming_info()
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if len(__UpperCAmelCase ) == 0:
return ""
__lowercase = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCAmelCase ) + 1 ):
__lowercase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__lowercase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCAmelCase ):
__lowercase = """"""
while integer != 0:
__lowercase = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
__lowercase = 0
while True:
__lowercase = word + """#""" + int_to_alphabetic(__UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
__lowercase = sword
break
__lowercase = short_word
__lowercase = word
return short_word
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = param_name.split("""_""" )
__lowercase = [TrialShortNamer.shortname_for_word(__UpperCAmelCase , __UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__lowercase = ["""""", """_"""]
for separator in separators:
__lowercase = separator.join(__UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
__lowercase = shortname
__lowercase = param_name
return shortname
return param_name
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TrialShortNamer.shortname_for_key(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = short_name
__lowercase = param_name
@classmethod
def __magic_name__ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
__lowercase = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
__lowercase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = info
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
__lowercase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__lowercase = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowercase = 1 if v else 0
__lowercase = """""" if isinstance(__UpperCAmelCase , (int, float) ) else """-"""
__lowercase = F'''{key}{sep}{v}'''
name.append(__UpperCAmelCase )
return "_".join(__UpperCAmelCase )
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__lowercase = []
else:
__lowercase = repr.split("""_""" )
__lowercase = {}
for value in values:
if "-" in value:
__lowercase , __lowercase = value.split("""-""" )
else:
__lowercase = re.sub("""[0-9.]""" , """""" , __UpperCAmelCase )
__lowercase = float(re.sub("""[^0-9.]""" , """""" , __UpperCAmelCase ) )
__lowercase = cls.NAMING_INFO["""reverse_short_param"""][p_k]
__lowercase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__lowercase = cls.DEFAULTS[k]
return parameters
| 566 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'''tokenizer_file''': '''tokenizer.json'''}
lowercase__ ={
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __init__(self : List[str] , snake_case_ : List[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : int="<unk>" , snake_case_ : Tuple="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : List[str]="<pad>" , snake_case_ : str=False , snake_case_ : Any=False , **snake_case_ : Union[str, Any] , ):
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__a : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__a : Tuple = add_prefix_space
__a : Optional[Any] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
__a : List[str] = add_prefix_space
def lowerCAmelCase (self : Tuple , *snake_case_ : Tuple , **snake_case_ : Any ):
__a : Any = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase (self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
__a : List[Any] = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''' )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase (self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] = None ):
__a : Optional[Any] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase (self : Dict , snake_case_ : str ):
__a : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
__a : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 521 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowercase : Union[str, Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def lowerCAmelCase__ ( _a : Union[str, Any] ):
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
snake_case_ : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
snake_case_ : str = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
snake_case_ : List[Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
snake_case_ : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
snake_case_ : int = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
snake_case_ : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ : Optional[int] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
snake_case_ : str = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : str , _a : Optional[Any] , _a : Optional[Any] ):
snake_case_ : Any = {}
import re
snake_case_ : Optional[int] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
snake_case_ : Union[str, Any] = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : int = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : str = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
snake_case_ : Any = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Dict = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : List[str] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
snake_case_ : Optional[int] = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Optional[Any] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
snake_case_ : int = re_encoder_block_conv_in.match(_a )
snake_case_ : Dict = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
snake_case_ : List[str] = re_encoder_block_conv_in.sub(_a , _a )
elif re_encoder_block_resnet.fullmatch(_a ):
snake_case_ : List[str] = re_encoder_block_resnet.match(_a )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ : Any = {"1": 1, "3": 2}[groups[-2]]
snake_case_ : Tuple = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
snake_case_ : Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case_ : Any = prefix + resnet_block
snake_case_ : List[Any] = re_encoder_block_resnet.sub(_a , _a )
elif re_encoder_block_proj_out.fullmatch(_a ):
snake_case_ : List[Any] = re_encoder_block_proj_out.match(_a )
snake_case_ : List[str] = regex_match.groups()
snake_case_ : Union[str, Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
snake_case_ : Any = re_encoder_block_proj_out.sub(_a , _a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
snake_case_ : Tuple = re_decoder_block_conv_out.match(_a )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ : Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
snake_case_ : Dict = re_decoder_block_conv_out.sub(_a , _a )
elif re_decoder_block_resnet.fullmatch(_a ):
snake_case_ : Any = re_decoder_block_resnet.match(_a )
snake_case_ : List[str] = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ : int = {"1": 1, "3": 2}[groups[-2]]
snake_case_ : Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
snake_case_ : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case_ : str = prefix + resnet_block
snake_case_ : Optional[int] = re_decoder_block_resnet.sub(_a , _a )
elif re_decoder_block_proj_in.fullmatch(_a ):
snake_case_ : Any = re_decoder_block_proj_in.match(_a )
snake_case_ : Optional[int] = regex_match.groups()
snake_case_ : int = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
snake_case_ : Dict = re_decoder_block_proj_in.sub(_a , _a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
snake_case_ : List[str] = re_prior_cond_conv_out.match(_a )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ : str = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
snake_case_ : List[Any] = re_prior_cond_conv_out.sub(_a , _a )
elif re_prior_cond_resnet.fullmatch(_a ):
snake_case_ : str = re_prior_cond_resnet.match(_a )
snake_case_ : str = regex_match.groups()
snake_case_ : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ : str = {"1": 1, "3": 2}[groups[-2]]
snake_case_ : List[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
snake_case_ : Union[str, Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case_ : Optional[int] = prefix + resnet_block
snake_case_ : List[str] = re_prior_cond_resnet.sub(_a , _a )
elif re_prior_cond_proj_in.fullmatch(_a ):
snake_case_ : Any = re_prior_cond_proj_in.match(_a )
snake_case_ : int = regex_match.groups()
snake_case_ : Dict = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
snake_case_ : Union[str, Any] = re_prior_cond_proj_in.sub(_a , _a )
# keep original key
else:
snake_case_ : List[Any] = original_key
snake_case_ : Any = replace_key(_a )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
snake_case_ : List[Any] = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
snake_case_ : Dict = original_key
snake_case_ : str = original_key
snake_case_ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def lowerCAmelCase__ ( _a : Any=None , _a : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
snake_case_ : int = requests.get(F'''{PREFIX}{file}''' , allow_redirects=_a )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=_a )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , "wb" ).write(r.content )
snake_case_ : Union[str, Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
snake_case_ : Dict = JukeboxConfig.from_pretrained(_a )
snake_case_ : List[Any] = JukeboxModel(_a )
snake_case_ : Dict = []
snake_case_ : int = {}
for i, dict_name in enumerate(_a ):
snake_case_ : Optional[int] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["model"]
snake_case_ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
snake_case_ : Tuple = old_dic[k]
elif k.endswith(".w" ):
snake_case_ : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ : Dict = old_dic[k]
else:
snake_case_ : Optional[Any] = old_dic[k]
snake_case_ : List[str] = "vqvae" if i == 0 else F'''priors.{3 - i}'''
snake_case_ : List[str] = fix_jukebox_keys(_a , model.state_dict() , _a , _a )
weight_dict.append(_a )
snake_case_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , "w" ) as txtfile:
json.dump(_a , _a )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowercase : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 568 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
snake_case__ = MBartConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any]=13 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=False , __UpperCamelCase : Dict=99 , __UpperCamelCase : int=32 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=20 , __UpperCamelCase : int=2 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : int=0 , ) -> List[Any]:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : str ) -> Dict:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_mbart_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> str:
_UpperCamelCase = TFMBartModel(config=__UpperCamelCase ).get_decoder()
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = inputs_dict['''head_mask''']
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
_UpperCamelCase = past_key_values[1]
def lowercase ( a__ : Union[str, Any] , a__ : Any , a__ : int , a__ : List[Any]=None , a__ : Dict=None , a__ : int=None , a__ : List[Any]=None , a__ : Optional[int]=None , ) -> Dict:
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
snake_case__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
snake_case__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : int ) -> Optional[int]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCamelCase ( self : Optional[int] ) -> int:
_UpperCamelCase = TFMBartModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ) -> Optional[Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
snake_case__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
snake_case__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _UpperCamelCase ( self : Dict ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCamelCase ( self : str , **__UpperCamelCase : str ) -> Any:
_UpperCamelCase = self.translate_src_text(**__UpperCamelCase )
self.assertListEqual(self.expected_text , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , **__UpperCamelCase : int ) -> str:
_UpperCamelCase = self.tokenizer(self.src_text , **__UpperCamelCase , return_tensors='''tf''' )
_UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_UpperCamelCase = self.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 342 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCAmelCase = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
UpperCAmelCase = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ConvBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[int]="[UNK]" , __UpperCamelCase : Tuple="[SEP]" , __UpperCamelCase : Tuple="[PAD]" , __UpperCamelCase : Tuple="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Optional[Any] , ) -> Any:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**__UpperCamelCase )
_UpperCamelCase = do_lower_case
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=None ) -> str:
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 342 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase_ ( _lowercase ) -> List[Any]:
__A : Tuple = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , _lowercase ).groups()[0]
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
__A : Any = file_names
__A : List[str] = image_transform
__A : Optional[Any] = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , __UpperCAmelCase ):
__A : str = self.file_names[idx]
__A : Optional[int] = PIL.Image.open(__UpperCAmelCase )
__A : Tuple = raw_image.convert("RGB" )
if self.image_transform is not None:
__A : List[Any] = self.image_transform(__UpperCAmelCase )
__A : Tuple = extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
__A : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase_ ( _lowercase , _lowercase ) -> List[str]:
# Initialize accelerator
if args.with_tracking:
__A : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__A : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Optional[Any] = config["lr"]
__A : Dict = int(config["num_epochs"] )
__A : Optional[int] = int(config["seed"] )
__A : List[Any] = int(config["batch_size"] )
__A : Tuple = config["image_size"]
if not isinstance(_lowercase , (list, tuple) ):
__A : Tuple = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
__A : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__A : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
__A : List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__A : Dict = os.path.split(_lowercase )[-1].split("." )[0]
accelerator.init_trackers(_lowercase , _lowercase )
# Grab all the image filenames
__A : Any = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__A : List[str] = [extract_label(_lowercase ) for fname in file_names]
__A : List[Any] = list(set(_lowercase ) )
id_to_label.sort()
__A : Dict = {lbl: i for i, lbl in enumerate(_lowercase )}
# Set the seed before splitting the data.
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# Split our filenames between train and validation
__A : Union[str, Any] = np.random.permutation(len(_lowercase ) )
__A : List[str] = int(0.8 * len(_lowercase ) )
__A : Tuple = random_perm[:cut]
__A : Union[str, Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__A : Dict = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] )
__A : Any = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase )
# For evaluation, we use a deterministic Resize
__A : Tuple = Compose([Resize(_lowercase ), ToTensor()] )
__A : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase )
# Instantiate dataloaders.
__A : Optional[Any] = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__A : Dict = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Optional[int] = create_model("resnet50d" , pretrained=_lowercase , num_classes=len(_lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__A : Tuple = False
for param in model.get_classifier().parameters():
__A : Optional[Any] = True
# We normalize the batches of images to be a bit faster.
__A : Union[str, Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__A : Union[str, Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__A : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__A : Tuple = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A : Union[str, Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__A : int = 0
# We also need to keep track of the starting epoch so files are named properly
__A : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
__A : int = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__A : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__A : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__A : Any = os.path.splitext(_lowercase )[0]
if "epoch" in training_difference:
__A : Optional[int] = int(training_difference.replace("epoch_" , "" ) ) + 1
__A : Optional[Any] = None
else:
__A : int = int(training_difference.replace("step_" , "" ) )
__A : str = resume_step // len(_lowercase )
resume_step -= starting_epoch * len(_lowercase )
# Now we train the model
for epoch in range(_lowercase , _lowercase ):
model.train()
if args.with_tracking:
__A : int = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__A : List[str] = accelerator.skip_first_batches(_lowercase , _lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__A : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
__A : Dict = (batch["image"] - mean) / std
__A : Union[str, Any] = model(_lowercase )
__A : Union[str, Any] = torch.nn.functional.cross_entropy(_lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowercase , _lowercase ):
__A : Union[str, Any] = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__A : Optional[int] = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
model.eval()
__A : List[str] = 0
__A : Any = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__A : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
__A : Optional[int] = (batch["image"] - mean) / std
with torch.no_grad():
__A : Any = model(_lowercase )
__A : List[Any] = outputs.argmax(dim=-1 )
__A , __A : List[Any] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__A : List[Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__A : Tuple = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(_lowercase ),
"epoch": epoch,
} , step=_lowercase , )
if checkpointing_steps == "epoch":
__A : Optional[int] = F"epoch_{epoch}"
if args.output_dir is not None:
__A : Tuple = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ) -> Tuple:
__A : str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=_lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=_lowercase , default=_lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=_lowercase , default=_lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=_lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowercase , default=_lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__A : Optional[int] = parser.parse_args()
__A : Any = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 520 | # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ):
__A : str = scheduler
__A : Union[str, Any] = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
__A : Any = split_batches
__A : Tuple = step_with_optimizer
__A : Optional[Any] = GradientState()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Optional[Any] = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_last_lr()
def __UpperCAmelCase( self ):
return self.scheduler.state_dict()
def __UpperCAmelCase( self , __UpperCAmelCase ):
self.scheduler.load_state_dict(__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_lr()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 520 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_a : List[str] = s_dict.pop(__a )
elif "subsample" in key:
_a : Optional[int] = s_dict.pop(__a )
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a, _a : List[Any] = emb.weight.shape
_a : Optional[int] = nn.Linear(__a , __a , bias=__a )
_a : Any = emb.weight.data
return lin_layer
def UpperCAmelCase_ (__a : Union[str, Any] , __a : str ):
"""simple docstring"""
_a : str = torch.load(__a , map_location='cpu' )
_a : Optional[Any] = mam_aaa['args']
_a : Union[str, Any] = mam_aaa['model']
_a : List[str] = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(__a )
rename_keys(__a )
_a : Dict = state_dict['decoder.embed_tokens.weight'].shape[0]
_a : List[str] = args.share_decoder_input_output_embed
_a : List[str] = [int(__a ) for i in args.conv_kernel_sizes.split(',' )]
_a : Dict = SpeechaTextConfig(
vocab_size=__a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(__a ) , conv_channels=args.conv_channels , conv_kernel_sizes=__a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__a , num_beams=5 , max_length=2_0_0 , use_cache=__a , decoder_start_token_id=2 , early_stopping=__a , )
_a : Any = SpeechaTextForConditionalGeneration(__a )
_a, _a : Dict = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
_a : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_a : int = lm_head_weights
model.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowerCAmelCase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 319 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase = [8, 5, 9, 7]
__lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,):
'''simple docstring'''
_a : Dict = claim_vector
_a : List[str] = allocated_resources_table
_a : List[Any] = maximum_claim_table
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self : int ):
'''simple docstring'''
return {self.__need().index(_a ): i for i in self.__need()}
def __lowercase ( self : Optional[Any] ,**_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.__need()
_a : str = self.__allocated_resources_table
_a : int = self.__available_resources()
_a : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_a : List[str] = False
for each_need in need_list:
_a : List[str] = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
_a : Dict = False
break
if execution:
_a : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : int = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
_a : Optional[int] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __init__( self : Dict , snake_case : Dict ):
"""simple docstring"""
_snake_case : Union[str, Any] = data
def __iter__( self : Union[str, Any] ):
"""simple docstring"""
for element in self.data:
yield element
def lowerCamelCase__ ( a__=True) -> Optional[int]:
"""simple docstring"""
_snake_case : str = Accelerator(even_batches=a__)
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase__ ( a__ , a__ , a__ , a__ = False) -> Dict:
"""simple docstring"""
if iterable:
_snake_case : List[Any] = DummyIterableDataset(torch.as_tensor(range(a__)))
else:
_snake_case : List[str] = TensorDataset(torch.as_tensor(range(a__)))
_snake_case : Dict = DataLoader(a__ , batch_size=a__)
_snake_case : str = accelerator.prepare(a__)
return dl
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , ) -> Any:
"""simple docstring"""
_snake_case : List[str] = create_dataloader(accelerator=a__ , dataset_size=a__ , batch_size=a__)
_snake_case : List[str] = [len(batch[0]) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_snake_case : Optional[int] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_snake_case : Tuple = create_accelerator(even_batches=a__)
verify_dataloader_batch_sizes(
a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_snake_case : str = create_accelerator(even_batches=a__)
_snake_case : List[str] = torch.nn.Linear(1 , 1)
_snake_case : Optional[Any] = accelerator.prepare(a__)
_snake_case : List[Any] = create_dataloader(a__ , dataset_size=3 , batch_size=1)
_snake_case : str = []
with accelerator.join_uneven_inputs([ddp_model]):
for batch_idx, batch in enumerate(a__):
_snake_case : List[Any] = ddp_model(batch[0].float())
_snake_case : str = output.sum()
loss.backward()
batch_idxs.append(a__)
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase__ ( a__) -> List[str]:
"""simple docstring"""
with warnings.catch_warnings(record=a__) as w:
with accelerator.join_uneven_inputs([Mock()]):
pass
assert issubclass(w[-1].category , a__)
assert "only supported for multi-GPU" in str(w[-1].message)
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = True
_snake_case : List[str] = False
_snake_case : Optional[Any] = create_accelerator(even_batches=a__)
_snake_case : List[Any] = torch.nn.Linear(1 , 1)
_snake_case : Tuple = accelerator.prepare(a__)
_snake_case : Union[str, Any] = create_dataloader(a__ , dataset_size=3 , batch_size=1)
_snake_case : Union[str, Any] = create_dataloader(a__ , dataset_size=3 , batch_size=1)
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__):
_snake_case : List[str] = train_dl.batch_sampler.even_batches
_snake_case : str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = True
_snake_case : List[Any] = False
_snake_case : int = create_accelerator(even_batches=a__)
_snake_case : Dict = torch.nn.Linear(1 , 1)
_snake_case : Optional[Any] = accelerator.prepare(a__)
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__)
_snake_case : List[str] = create_dataloader(a__ , dataset_size=3 , batch_size=1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__):
_snake_case : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_snake_case : List[str] = create_accelerator()
_snake_case : Optional[Any] = torch.nn.Linear(1 , 1)
_snake_case : Tuple = accelerator.prepare(a__)
create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__)
with warnings.catch_warnings(record=a__) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__):
pass
assert issubclass(w[-1].category , a__)
assert "only supported for map-style datasets" in str(w[-1].message)
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes')
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled')
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs')
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs')
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types')
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders')
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning')
_snake_case : Union[str, Any] = accelerator.state.distributed_type
_snake_case : int = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a__)
_snake_case : Union[str, Any] = original_state
if __name__ == "__main__":
main()
| 517 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str = None
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : int , snake_case : Dict , snake_case : int , snake_case : str , **snake_case : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : str , snake_case : Dict ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ):
"""simple docstring"""
return F"""`pip install {cls.pip_package or cls.name}`"""
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''optuna'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_optuna_available()
def __UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : Optional[int] ):
"""simple docstring"""
return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Optional[Any] , snake_case : Dict ):
"""simple docstring"""
return default_hp_space_optuna(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''ray'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''\'ray[tune]\''''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_ray_available()
def __UpperCAmelCase ( self : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : str ):
"""simple docstring"""
return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : int , snake_case : Optional[Any] ):
"""simple docstring"""
return default_hp_space_ray(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''sigopt'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_sigopt_available()
def __UpperCAmelCase ( self : int , snake_case : Optional[int] , snake_case : int , snake_case : str , **snake_case : Dict ):
"""simple docstring"""
return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : int , snake_case : List[Any] ):
"""simple docstring"""
return default_hp_space_sigopt(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''wandb'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_wandb_available()
def __UpperCAmelCase ( self : Dict , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : Optional[Any] ):
"""simple docstring"""
return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : int ):
"""simple docstring"""
return default_hp_space_wandb(snake_case )
SCREAMING_SNAKE_CASE_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__) > 0:
_snake_case : Any = available_backends[0].name
if len(a__) > 1:
logger.info(
F"""{len(a__)} hyperparameter search backends available. Using {name} as the default.""")
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 517 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = BertTokenizer
__lowerCAmelCase : List[Any] = BertTokenizerFast
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowerCAmelCase__ ( self):
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file)
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , [9, 6, 7, 12, 10, 11])
def lowerCAmelCase__ ( self):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A)
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A , add_special_tokens=_A)
self.assertListEqual(_A , _A)
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A)
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A)
self.assertListEqual(_A , _A)
# With lower casing
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(do_lower_case=_A)
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(do_lower_case=_A)
SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A)
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A , add_special_tokens=_A)
self.assertListEqual(_A , _A)
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A)
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A)
self.assertListEqual(_A , _A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
SCREAMING_SNAKE_CASE_ = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(_A) , _A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(_A):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=_A , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def lowerCAmelCase__ ( self):
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def lowerCAmelCase__ ( self):
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def lowerCAmelCase__ ( self):
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(_A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('bert-base-uncased')
SCREAMING_SNAKE_CASE_ = tokenizer.encode('sequence builders' , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = tokenizer.encode('multi-sequence build' , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A)
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A , _A)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A)
SCREAMING_SNAKE_CASE_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.do_lower_case if hasattr(_A , 'do_lower_case') else False
SCREAMING_SNAKE_CASE_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ['的', '人', '有']
SCREAMING_SNAKE_CASE_ = ''.join(_A)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A)
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A)
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(_A , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(_A , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(_A)
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(_A)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A)
self.assertListEqual(_A , _A)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A)
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A)
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(_A , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(_A , add_special_tokens=_A)
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(_A)
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(_A)
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A)
]
self.assertListEqual(_A , _A)
self.assertListEqual(_A , _A)
| 620 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache'
SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets'
SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics'
SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' )
def _UpperCAmelCase ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
| 620 | 1 |
"""simple docstring"""
import re
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(A__ ,A__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 95 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ (enum.Enum ):
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
__magic_name__ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Any = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
UpperCAmelCase_ : List[Any] = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : Optional[int] = {**self._forward_params, **forward_params}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[Any] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : Tuple = prefix
if prefix:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
UpperCAmelCase_ : Dict = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generate_kwargs
UpperCAmelCase_ : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : Tuple = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : int = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : int = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]="" , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : Any = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : Optional[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Dict = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Tuple = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ : Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Union[str, Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Dict:
UpperCAmelCase_ : Optional[Any] = model_inputs["input_ids"]
UpperCAmelCase_ : str = model_inputs.get("attention_mask" , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = 1
else:
UpperCAmelCase_ : Union[str, Any] = input_ids.shape[0]
UpperCAmelCase_ : Any = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : Any = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Tuple = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[int] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : int = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Optional[int] = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : List[Any] = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=ReturnType.FULL_TEXT , lowerCAmelCase_ : Dict=True ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs["generated_sequence"][0]
UpperCAmelCase_ : int = model_outputs["input_ids"]
UpperCAmelCase_ : List[str] = model_outputs["prompt_text"]
UpperCAmelCase_ : Union[str, Any] = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : str = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[Any] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Union[str, Any] = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : int = {"generated_text": all_text}
records.append(lowerCAmelCase_ )
return records
| 95 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 711 |
'''simple docstring'''
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(UpperCAmelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
UpperCamelCase : int = {
"""openbmb/cpm-ant-10b""": 1024,
}
def UpperCamelCase_ ( __a ) -> Tuple:
a__ : List[str] = collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
a__ : Optional[int] = reader.readlines()
for index, token in enumerate(__a ):
a__ : List[str] = token.rstrip("\n" )
a__ : Tuple = index
return vocab
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Any=200 ):
a__ : Tuple = vocab
a__ : Tuple = unk_token
a__ : List[Any] = max_input_chars_per_word
def _UpperCamelCase( self : int , lowerCamelCase__ : List[str] ):
a__ : List[str] = list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
a__ : Optional[int] = 0
a__ : Tuple = []
while start < len(lowerCamelCase__ ):
a__ : Union[str, Any] = len(lowerCamelCase__ )
a__ : List[str] = None
while start < end:
a__ : int = "".join(chars[start:end] )
if substr in self.vocab:
a__ : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase__ )
a__ : List[Any] = end
return sub_tokens
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = False
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]="<d>" , lowerCamelCase__ : List[Any]="</d>" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<pad>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : int="</n>" , lowerCamelCase__ : Optional[Any]="</_>" , lowerCamelCase__ : Any="left" , **lowerCamelCase__ : Optional[Any] , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Union[str, Any] = bod_token
a__ : Optional[Any] = eod_token
a__ : Any = load_vocab(lowerCamelCase__ )
a__ : Dict = self.encoder[space_token]
a__ : List[str] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
a__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
a__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCamelCase( self : List[str] ):
return self.encoder[self.bod_token]
@property
def _UpperCamelCase( self : Union[str, Any] ):
return self.encoder[self.eod_token]
@property
def _UpperCamelCase( self : Tuple ):
return self.encoder["\n"]
@property
def _UpperCamelCase( self : Optional[int] ):
return len(self.encoder )
def _UpperCamelCase( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[Any] ):
a__ : Optional[Any] = []
for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )
return output_tokens
def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Tuple = [i for i in token_ids if i >= 0]
a__ : List[str] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] ):
return token in self.encoder
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] ):
return "".join(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if os.path.isdir(lowerCamelCase__ ):
a__ : Optional[int] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
a__ : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory
a__ : Dict = 0
if " " in self.encoder:
a__ : int = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
a__ : int = self.encoder["\n"]
del self.encoder["\n"]
a__ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
a__ : List[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ ))
| 37 |
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = emb.weight.shape
__snake_case : List[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
__snake_case : Optional[int] = emb.weight.data
return lin_layer
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
__snake_case : str = torch.load(UpperCAmelCase_ , map_location='cpu' )
__snake_case : int = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__snake_case : Dict = mam_aaa["model"]
remove_ignore_keys_(UpperCAmelCase_ )
__snake_case : Union[str, Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
__snake_case : str = MaMaaaConfig(
vocab_size=UpperCAmelCase_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
__snake_case : str = state_dict["decoder.embed_tokens.weight"]
__snake_case : List[Any] = MaMaaaForConditionalGeneration(UpperCAmelCase_ )
model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
__snake_case : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_a : int= argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_a : Any= parser.parse_args()
_a : List[str]= convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 719 | """simple docstring"""
import string
import numpy
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class UpperCamelCase :
UpperCAmelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase : List[Any] = numpy.vectorize(lambda lowercase : x % 36 )
UpperCAmelCase : Dict = numpy.vectorize(lowercase )
def __init__(self : str , _A : numpy.ndarray) -> None:
__snake_case : str = self.modulus(_A) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case : Optional[Any] = encrypt_key.shape[0]
def _lowercase (self : Any , _A : str) -> int:
return self.key_string.index(_A)
def _lowercase (self : Union[str, Any] , _A : int) -> str:
return self.key_string[round(_A)]
def _lowercase (self : Optional[int]) -> None:
__snake_case : Any = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : Any = det % len(self.key_string)
__snake_case : Tuple = len(self.key_string)
if greatest_common_divisor(_A , len(self.key_string)) != 1:
__snake_case : List[str] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_A)
def _lowercase (self : Dict , _A : str) -> str:
__snake_case : str = [char for char in text.upper() if char in self.key_string]
__snake_case : int = chars[-1]
while len(_A) % self.break_key != 0:
chars.append(_A)
return "".join(_A)
def _lowercase (self : Union[str, Any] , _A : str) -> str:
__snake_case : Any = self.process_text(text.upper())
__snake_case : Dict = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Dict = text[i : i + self.break_key]
__snake_case : List[str] = [self.replace_letters(_A) for char in batch]
__snake_case : str = numpy.array([vec]).T
__snake_case : List[Any] = self.modulus(self.encrypt_key.dot(_A)).T.tolist()[
0
]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowercase (self : Optional[int]) -> numpy.ndarray:
__snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : int = det % len(self.key_string)
__snake_case : Optional[Any] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
__snake_case : Dict = i
break
__snake_case : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(_A))
def _lowercase (self : int , _A : str) -> str:
__snake_case : int = self.make_decrypt_key()
__snake_case : List[str] = self.process_text(text.upper())
__snake_case : str = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Optional[Any] = text[i : i + self.break_key]
__snake_case : Union[str, Any] = [self.replace_letters(_A) for char in batch]
__snake_case : Tuple = numpy.array([vec]).T
__snake_case : List[str] = self.modulus(decrypt_key.dot(_A)).T.tolist()[0]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : List[str] = int(input('Enter the order of the encryption key: ' ) )
__snake_case : str = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(UpperCAmelCase_ ):
__snake_case : Union[str, Any] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
__snake_case : Dict = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__snake_case : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__snake_case : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
__snake_case : Tuple = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int =logging.get_logger()
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = True ):
print(f"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
UpperCAmelCase__: Tuple = timm.create_model("levit_128s" ,pretrained=SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__: int = timm.create_model("levit_128" ,pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_9_2:
UpperCAmelCase__: List[str] = timm.create_model("levit_192" ,pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_5_6:
UpperCAmelCase__: Optional[int] = timm.create_model("levit_256" ,pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_8_4:
UpperCAmelCase__: Any = timm.create_model("levit_384" ,pretrained=SCREAMING_SNAKE_CASE )
from_model.eval()
UpperCAmelCase__: str = LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase__: int = OrderedDict()
UpperCAmelCase__: Dict = from_model.state_dict()
UpperCAmelCase__: List[str] = list(from_model.state_dict().keys() )
UpperCAmelCase__: Dict = list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE ) ,len(SCREAMING_SNAKE_CASE ) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__: List[str] = weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[Any] = torch.randn((2, 3, 2_2_4, 2_2_4) )
UpperCAmelCase__: List[str] = from_model(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Union[str, Any] = our_model(SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
UpperCAmelCase__: Optional[int] = name
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase__: List[str] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"Pushed {checkpoint_name}" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = None ,SCREAMING_SNAKE_CASE = True ):
UpperCAmelCase__: Optional[Any] = "imagenet-1k-id2label.json"
UpperCAmelCase__: List[str] = 1_0_0_0
UpperCAmelCase__: List[str] = (1, num_labels)
UpperCAmelCase__: Optional[Any] = "huggingface/label-files"
UpperCAmelCase__: List[Any] = num_labels
UpperCAmelCase__: Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase__: Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase__: int = idalabel
UpperCAmelCase__: Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__: List[str] = partial(SCREAMING_SNAKE_CASE ,num_labels=SCREAMING_SNAKE_CASE ,idalabel=SCREAMING_SNAKE_CASE ,labelaid=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = {
"levit-128S": 1_2_8,
"levit-128": 1_2_8,
"levit-192": 1_9_2,
"levit-256": 2_5_6,
"levit-384": 3_8_4,
}
UpperCAmelCase__: List[str] = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[1_6, 1_6, 1_6] ,drop_path_rate=0 ,),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] ,num_attention_heads=[4, 8, 1_2] ,depths=[4, 4, 4] ,key_dim=[1_6, 1_6, 1_6] ,drop_path_rate=0 ,),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[3_2, 3_2, 3_2] ,drop_path_rate=0 ,),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[3_2, 3_2, 3_2] ,drop_path_rate=0 ,),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] ,num_attention_heads=[6, 9, 1_2] ,depths=[4, 4, 4] ,key_dim=[3_2, 3_2, 3_2] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,SCREAMING_SNAKE_CASE ,names_to_config[model_name] ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_lowerCAmelCase : List[str] =parser.parse_args()
_lowerCAmelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 113 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple ={
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =["""MobileNetV2FeatureExtractor"""]
_lowerCAmelCase : Optional[int] =["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] =[
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any]= logging.get_logger(__name__)
A__ : Union[str, Any]= {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
a : Dict ="""ibert"""
def __init__( self , snake_case_=3_0522 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=False , snake_case_="none" , **snake_case_ , ) -> Union[str, Any]:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = quant_mode
UpperCamelCase__ = force_dequant
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 720 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : Node | None , ) -> None:
'''simple docstring'''
a__ : Optional[int] = pos_x
a__ : str = pos_y
a__ : Optional[int] = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Any = goal_y
a__ : Any = g_cost
a__ : Optional[int] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
a__ : List[Any] = self.g_cost + self.h_cost
def __lowerCAmelCase ( self : Union[str, Any] ) -> float:
'''simple docstring'''
a__ : List[str] = self.pos_x - self.goal_x
a__ : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A__ ) + abs(A__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , A__ : Node ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : TPosition , A__ : TPosition ) -> Optional[Any]:
'''simple docstring'''
a__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A__ )
a__ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A__ )
a__ : Dict = [self.start]
a__ : list[Node] = []
a__ : str = False
def __lowerCAmelCase ( self : List[str] ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A__ )
self.closed_nodes.append(A__ )
a__ : List[Any] = self.get_successors(A__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A__ )
else:
self.open_nodes.append(A__ )
return [self.start.pos]
def __lowerCAmelCase ( self : Optional[Any] , A__ : Node ) -> list[Node]:
'''simple docstring'''
a__ : Optional[int] = []
for action in delta:
a__ : List[Any] = parent.pos_x + action[1]
a__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A__ , A__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A__ , ) )
return successors
def __lowerCAmelCase ( self : List[Any] , A__ : Node | None ) -> list[TPosition]:
'''simple docstring'''
a__ : Union[str, Any] = node
a__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , A__ : TPosition , A__ : TPosition ) -> None:
'''simple docstring'''
a__ : str = AStar(A__ , A__ )
a__ : Optional[int] = AStar(A__ , A__ )
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : int = self.fwd_astar.open_nodes.pop(0 )
a__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A__ , A__ )
self.fwd_astar.closed_nodes.append(A__ )
self.bwd_astar.closed_nodes.append(A__ )
a__ : Tuple = current_bwd_node
a__ : Optional[int] = current_fwd_node
a__ : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(A__ ),
self.bwd_astar: self.bwd_astar.get_successors(A__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A__ )
else:
# retrieve the best current path
a__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A__ )
else:
astar.open_nodes.append(A__ )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self : List[str] , A__ : Node , A__ : Node ) -> list[TPosition]:
'''simple docstring'''
a__ : str = self.fwd_astar.retrace_path(A__ )
a__ : List[str] = self.bwd_astar.retrace_path(A__ )
bwd_path.pop()
bwd_path.reverse()
a__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = AStar(init, goal)
__SCREAMING_SNAKE_CASE = a_star.search()
__SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 688 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'google/fnet-base': 5_1_2,
'google/fnet-large': 5_1_2,
}
__SCREAMING_SNAKE_CASE = '▁'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "token_type_ids"]
__UpperCamelCase = FNetTokenizer
def __init__( self : Any , A__ : Any=None , A__ : int=None , A__ : List[str]=False , A__ : int=True , A__ : str=True , A__ : List[Any]="<unk>" , A__ : Dict="[SEP]" , A__ : List[str]="<pad>" , A__ : Union[str, Any]="[CLS]" , A__ : Dict="[MASK]" , **A__ : Tuple , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
a__ : Optional[Any] = do_lower_case
a__ : Dict = remove_space
a__ : List[Any] = keep_accents
a__ : Optional[Any] = vocab_file
a__ : Any = False if not self.vocab_file else True
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Dict = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 688 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
__a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__a = True, True
__a = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
return path
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__a = 0
__a = -1
for i in range(__snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
__a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__a = check_circuit_or_path(__snake_case , __snake_case )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
__a = 1
if check == 2:
__a = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
__a = dfs(__snake_case , __snake_case , __snake_case )
print(__snake_case )
def __lowercase ( ) -> Any:
"""simple docstring"""
__a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__a = {
1: [],
2: []
# all degree is zero
}
__a = 10
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
SCREAMING_SNAKE_CASE_ = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE_ = len(train_data)
SCREAMING_SNAKE_CASE_ = 0.0_0_9
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - output(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=m ) -> Optional[Any]:
"""simple docstring"""
__a = 0
for i in range(__SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(__SCREAMING_SNAKE_CASE )
else:
summation_value += _error(__SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__a = summation_of_cost_derivative(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def __lowercase ( ) -> str:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__a = 0.000_002
__a = 0
__a = 0
while True:
j += 1
__a = [0, 0, 0, 0]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
__a = get_cost_derivative(i - 1 )
__a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE , rtol=__SCREAMING_SNAKE_CASE , ):
break
__a = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowercase ( ) -> List[Any]:
"""simple docstring"""
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
print(("""Actual output value:""", output(__SCREAMING_SNAKE_CASE , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__SCREAMING_SNAKE_CASE , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 201 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.