code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
A__ : Dict ='''base_with_context'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase = weights[f"layers_{lyr_num}"]
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = ly_weight["""attention"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase = weights[f"layers_{lyr_num}"]
_lowerCAmelCase = ly_weight["""attention"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase = weights[f"layers_{lyr_num}"]
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowerCAmelCase = ly_weight["""self_attention"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = ly_weight["""MultiHeadDotProductAttention_0"""]
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase = jnp.tree_util.tree_map(onp.array , lowerCAmelCase )
_lowerCAmelCase = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_lowerCAmelCase = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
_lowerCAmelCase = inference.parse_training_gin_file(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = inference.InferenceModel(args.checkpoint_path , lowerCAmelCase )
_lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
_lowerCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowerCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowerCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , lowerCAmelCase )
_lowerCAmelCase = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , lowerCAmelCase )
_lowerCAmelCase = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , lowerCAmelCase )
_lowerCAmelCase = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_lowerCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=lowerCAmelCase , continuous_encoder=lowerCAmelCase , decoder=lowerCAmelCase , scheduler=lowerCAmelCase , melgan=lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
A__ : List[str] =parser.parse_args()
main(args)
| 207
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Any =None
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Union[str, Any] ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[str] ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__ : List[str] ='''▁'''
# Segments (not really needed)
A__ : str =0
A__ : str =1
A__ : List[Any] =2
A__ : str =3
A__ : Optional[Any] =4
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = VOCAB_FILES_NAMES
_lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = '''left'''
_lowercase: Dict = XLNetTokenizer
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False , __snake_case : str=True , __snake_case : Union[str, Any]=False , __snake_case : List[Any]="<s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<unk>" , __snake_case : int="<sep>" , __snake_case : int="<pad>" , __snake_case : Dict="<cls>" , __snake_case : int="<mask>" , __snake_case : Optional[int]=["<eop>", "<eod>"] , **__snake_case : List[str] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 207
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =DiTPipeline
UpperCamelCase__ : Tuple =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : List[str] =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : int =False
def A__ ( self : int ):
torch.manual_seed(0 )
lowercase__ = TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=__lowercase, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=__lowercase, )
lowercase__ = AutoencoderKL()
lowercase__ = DDIMScheduler()
lowercase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def A__ ( self : Optional[int], __lowercase : Dict, __lowercase : Optional[int]=0 ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A__ ( self : List[Any] ):
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
lowercase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase, 1e-3 )
def A__ ( self : Optional[int] ):
self._test_inference_batch_single_identical(relax_max_difference=__lowercase, expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A__ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase):
def A__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
lowercase__ = torch.manual_seed(0 )
lowercase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ = pipe.get_label_ids(__lowercase )
lowercase__ = pipe(__lowercase, generator=__lowercase, num_inference_steps=40, output_type="np" ).images
for word, image in zip(__lowercase, __lowercase ):
lowercase__ = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ ( self : Dict ):
lowercase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ = ["vase", "umbrella"]
lowercase__ = pipe.get_label_ids(__lowercase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(__lowercase, generator=__lowercase, num_inference_steps=25, output_type="np" ).images
for word, image in zip(__lowercase, __lowercase ):
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 496
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@require_torch
def snake_case ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowercase__ : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowercase__ : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowercase__ : List[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowercase__ : Optional[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" , model=SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
lowercase__ : List[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowercase__ : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase__ : str = "1"
lowercase__ : Tuple = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case ( self : Dict ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowercase__ : Any = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowercase__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowercase__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowercase__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" , model=SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
lowercase__ : List[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowercase__ : Dict = self.get_env()
lowercase__ : Any = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case ( self : Any ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowercase__ : Union[str, Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
lowercase__ : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
lowercase__ : List[str] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
lowercase__ : int = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowercase__ : Optional[Any] = self.get_env()
lowercase__ : Tuple = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
lowercase__ : str = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase__ : int = "1"
lowercase__ : Dict = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = "\nfrom transformers import pipeline\n "
lowercase__ : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
lowercase__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
lowercase__ : Any = self.get_env()
lowercase__ : Dict = "1"
lowercase__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
lowercase__ : int = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = "\nfrom transformers import AutoModel\n "
lowercase__ : int = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
lowercase__ : str = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowercase__ : Dict = self.get_env()
lowercase__ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase__ : Optional[int] = "1"
lowercase__ : Optional[int] = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 496
| 1
|
'''simple docstring'''
def lowerCAmelCase( a__ : int , a__ : int , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 426
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype =jnp.floataa
__lowerCAmelCase : bool =True
def __UpperCAmelCase ( self):
super().setup()
lowerCamelCase__ = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = super().__call__(*UpperCamelCase , **UpperCamelCase)
lowerCamelCase__ = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase( a__ : List[str] , a__ : Dict , a__ : Optional[Any] , a__ : List[str] , a__ : str , a__ : Optional[int] ):
'''simple docstring'''
def cross_entropy(a__ : Union[str, Any] , a__ : Any , a__ : List[Any]=None ):
lowerCamelCase__ = logits.shape[-1]
lowerCamelCase__ = (labels[..., None] == jnp.arange(a__ )[None]).astype("f4" )
lowerCamelCase__ = jax.nn.log_softmax(a__ , axis=-1 )
lowerCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__ = reduction(a__ )
return loss
lowerCamelCase__ = partial(a__ , reduction=jnp.mean )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : str ="google/bigbird-roberta-base"
__lowerCAmelCase : int =3_0_0_0
__lowerCAmelCase : int =1_0_5_0_0
__lowerCAmelCase : int =1_2_8
__lowerCAmelCase : int =3
__lowerCAmelCase : int =1
__lowerCAmelCase : int =5
# tx_args
__lowerCAmelCase : float =3e-5
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =2_0_0_0_0
__lowerCAmelCase : float =0.0095
__lowerCAmelCase : str ="bigbird-roberta-natural-questions"
__lowerCAmelCase : str ="training-expt"
__lowerCAmelCase : str ="data/nq-training.jsonl"
__lowerCAmelCase : str ="data/nq-validation.jsonl"
def __UpperCAmelCase ( self):
os.makedirs(self.base_dir , exist_ok=UpperCamelCase)
lowerCamelCase__ = os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase):
lowerCamelCase__ = self.collate_fn(UpperCamelCase)
lowerCamelCase__ = jax.tree_util.tree_map(UpperCamelCase , UpperCamelCase)
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ , lowerCamelCase__ = self.fetch_inputs(features["input_ids"])
lowerCamelCase__ = {
"input_ids": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [self._fetch_inputs(UpperCamelCase) for ids in input_ids]
return zip(*UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [1 for _ in range(len(UpperCamelCase))]
while len(UpperCamelCase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , a__ : Any=None ):
'''simple docstring'''
if seed is not None:
lowerCamelCase__ = dataset.shuffle(seed=a__ )
for i in range(len(a__ ) // batch_size ):
lowerCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a__ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , **a__ : Union[str, Any] ):
'''simple docstring'''
def loss_fn(a__ : str ):
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
return state.loss_fn(
a__ , a__ , a__ , a__ , a__ , a__ , )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(a__ )
lowerCamelCase__ = jax.value_and_grad(a__ )
lowerCamelCase__ , lowerCamelCase__ = grad_fn(state.params )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__ = jax.lax.pmean(a__ , "batch" )
lowerCamelCase__ = state.apply_gradients(grads=a__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : int , **a__ : Tuple ):
'''simple docstring'''
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=state.params , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class snake_case_ ( train_state.TrainState ):
"""simple docstring"""
__lowerCAmelCase : Callable =struct.field(pytree_node=A__ )
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable =None
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None):
lowerCamelCase__ = model.params
lowerCamelCase__ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , loss_fn=UpperCamelCase , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = restore_checkpoint(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__ = build_tx(**UpperCamelCase)
lowerCamelCase__ = train_state.TrainState(
step=UpperCamelCase , apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , opt_state=UpperCamelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = data_collator
lowerCamelCase__ = lr
lowerCamelCase__ = params
lowerCamelCase__ = jax_utils.replicate(UpperCamelCase)
return state
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.args
lowerCamelCase__ = len(UpperCamelCase) // args.batch_size
lowerCamelCase__ = jax.random.PRNGKey(0)
lowerCamelCase__ = jax.random.split(UpperCamelCase , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , args.batch_size , seed=UpperCamelCase)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc=f"""Running EPOCH-{epoch}"""):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.train_step_fn(UpperCamelCase , UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__ = jax_utils.unreplicate(state.step)
lowerCamelCase__ = running_loss.item() / i
lowerCamelCase__ = self.scheduler_fn(state_step - 1)
lowerCamelCase__ = self.evaluate(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCamelCase))
self.logger.log(UpperCamelCase , commit=UpperCamelCase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , self.args.batch_size)
lowerCamelCase__ = len(UpperCamelCase) // self.args.batch_size
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc="Evaluating ... "):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ = self.val_step_fn(UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = jax_utils.unreplicate(UpperCamelCase)
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCamelCase , params=state.params)
with open(os.path.join(UpperCamelCase , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCamelCase , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCamelCase , "data_collator.joblib"))
with open(os.path.join(UpperCamelCase , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCamelCase)
print("DONE")
def lowerCAmelCase( a__ : str , a__ : Optional[int] ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(a__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(a__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.opt_state , f.read() )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "args.joblib" ) )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "data_collator.joblib" ) )
with open(os.path.join(a__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase__ = json.load(a__ )
lowerCamelCase__ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase( a__ : Union[str, Any] , a__ : List[str] , a__ : Tuple , a__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = num_train_steps - warmup_steps
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ )
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=1E-7 , transition_steps=a__ )
lowerCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase( a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : List[str] , a__ : Union[str, Any] ):
'''simple docstring'''
def weight_decay_mask(a__ : str ):
lowerCamelCase__ = traverse_util.flatten_dict(a__ )
lowerCamelCase__ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(a__ )
lowerCamelCase__ = scheduler_fn(a__ , a__ , a__ , a__ )
lowerCamelCase__ = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ )
return tx, lr
| 426
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 184
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
'''simple docstring'''
__A =size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__A =parent
__A =batch_size
__A =num_channels
__A =image_size
__A =min_resolution
__A =max_resolution
__A =do_resize
__A =size
__A =apply_ocr
def __UpperCamelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =LayoutLMvaImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
self.assertTrue(hasattr(lowercase__ , '''apply_ocr''' ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
__A =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def __UpperCamelCase ( self ):
'''simple docstring'''
pass
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
__A =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowercase__ )
self.assertIsInstance(encoding.boxes , lowercase__ )
# Test batched
__A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
__A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
__A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =LayoutLMvaImageProcessor()
from datasets import load_dataset
__A =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__A =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A =image_processing(lowercase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A =[[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase__ )
self.assertListEqual(encoding.boxes , lowercase__ )
# with apply_OCR = False
__A =LayoutLMvaImageProcessor(apply_ocr=lowercase__ )
__A =image_processing(lowercase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 184
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase__ : Tuple = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_A : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_A : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__SCREAMING_SNAKE_CASE : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}] )
__SCREAMING_SNAKE_CASE : str = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
__SCREAMING_SNAKE_CASE : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
# Legacy behavior
__SCREAMING_SNAKE_CASE : Optional[int] = text_classifier("""This is great !""" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
__SCREAMING_SNAKE_CASE : Tuple = text_classifier("""This is great !""" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}]] )
__SCREAMING_SNAKE_CASE : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
__SCREAMING_SNAKE_CASE : Dict = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_04},
{"""label""": """LABEL_0""", """score""": 0.5_04},
] , )
@require_torch
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : Dict = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__SCREAMING_SNAKE_CASE : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@require_tf
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__SCREAMING_SNAKE_CASE : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@slow
@require_torch
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = pipeline("""text-classification""" )
__SCREAMING_SNAKE_CASE : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__SCREAMING_SNAKE_CASE : int = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__SCREAMING_SNAKE_CASE : int = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
@slow
@require_tf
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pipeline("""text-classification""" , framework="""tf""" )
__SCREAMING_SNAKE_CASE : Dict = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__SCREAMING_SNAKE_CASE : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__SCREAMING_SNAKE_CASE : Dict = """HuggingFace is in"""
__SCREAMING_SNAKE_CASE : int = text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__SCREAMING_SNAKE_CASE : List[Any] = ["""HuggingFace is in """, """Paris is in France"""]
__SCREAMING_SNAKE_CASE : int = text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )}, {"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__SCREAMING_SNAKE_CASE : Any = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )}] * N, [{"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )}] * N] , )
__SCREAMING_SNAKE_CASE : Any = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__SCREAMING_SNAKE_CASE : int = text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__SCREAMING_SNAKE_CASE : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__SCREAMING_SNAKE_CASE : List[Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"""label""": ANY(lowerCAmelCase__ ), """score""": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 720
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ : Optional[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase__ : List[str] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase__ : Dict = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]="uniform_average" , lowerCAmelCase__ : int=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse}
| 178
| 0
|
"""simple docstring"""
class lowercase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
UpperCamelCase : int = False
def _a ( self , _A ):
'''simple docstring'''
for word in words:
self.insert(_A )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = self
for char in word:
if char not in curr.nodes:
UpperCamelCase : List[Any] = TrieNode()
UpperCamelCase : List[str] = curr.nodes[char]
UpperCamelCase : List[Any] = True
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self
for char in word:
if char not in curr.nodes:
return False
UpperCamelCase : List[str] = curr.nodes[char]
return curr.is_leaf
def _a ( self , _A ):
'''simple docstring'''
def _delete(_A , _A , _A ) -> bool:
if index == len(_A ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCamelCase : Optional[Any] = False
return len(curr.nodes ) == 0
UpperCamelCase : Tuple = word[index]
UpperCamelCase : int = curr.nodes.get(_A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCamelCase : Dict = _delete(_A , _A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _A , 0 )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if node.is_leaf:
print(SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(SCREAMING_SNAKE_CASE , word + key )
def UpperCamelCase ():
UpperCamelCase : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
UpperCamelCase : List[Any] = TrieNode()
root.insert_many(SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print(str(SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def UpperCamelCase ():
assert test_trie()
def UpperCamelCase ():
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 102
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( lowercase__):
if not isinstance(lowercase__ , lowercase__):
raise ValueError("Input must be an integer")
if input_num <= 0:
raise ValueError("Input must be positive")
return sum(
divisor for divisor in range(1 , input_num // 2 + 1) if input_num % divisor == 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "mobilenet_v1"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=224 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : str="relu6" , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=0.9_9_9 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : str=0.0_0_1 , **__lowerCamelCase : str , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = min_depth
lowerCamelCase__ = hidden_act
lowerCamelCase__ = tf_padding
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def a__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 187
| 0
|
def lowerCamelCase ( a_ , a_ ) -> list[int]:
lowerCAmelCase_ = int(a_ )
# Initialize Result
lowerCAmelCase_ = []
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCamelCase_ = []
lowerCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
lowerCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
lowerCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCamelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
lowerCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
lowerCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 318
|
def lowerCamelCase ( ) -> List[Any]:
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def lowerCamelCase ( a_ ) -> str:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
while i * i <= n:
lowerCAmelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(a_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 318
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 706
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def _UpperCAmelCase ( ):
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case__ = parser.parse_args()
return args.f
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = logging.StreamHandler(sys.stdout)
logger.addHandler(UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(UpperCamelCase__ , """argv""" , UpperCamelCase__):
snake_case__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCamelCase__ , 0.6_66)
@slow
@require_torch_non_multi_gpu
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
| 99
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[Any] =get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =BartphoTokenizer
snake_case_ =False
snake_case_ =True
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Optional[int] = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCAmelCase__ : int = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
lowerCAmelCase__ : Tuple = BartphoTokenizer(__lowerCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = '''This is a là test'''
lowerCAmelCase__ : Optional[Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = BartphoTokenizer(__lowerCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
lowerCAmelCase__ : int = '''This is a là test'''
lowerCAmelCase__ : Optional[Any] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCAmelCase__ : List[str] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : int = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase )
| 647
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =RobertaConfig
snake_case_ ="""roberta"""
def __init__(self ,__lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Any = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =RobertaConfig
snake_case_ ="""roberta"""
def __init__(self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Any = config.num_labels
lowerCAmelCase__ : List[Any] = config.num_hidden_layers
lowerCAmelCase__ : int = DeeRobertaModel(__lowerCamelCase )
lowerCAmelCase__ : Any = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Dict = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=-1 ,__lowerCamelCase=False ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.num_layers
try:
lowerCAmelCase__ : Union[str, Any] = self.roberta(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,position_ids=__lowerCamelCase ,head_mask=__lowerCamelCase ,inputs_embeds=__lowerCamelCase ,)
lowerCAmelCase__ : Optional[int] = outputs[1]
lowerCAmelCase__ : str = self.dropout(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.classifier(__lowerCamelCase )
lowerCAmelCase__ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase__ : Union[str, Any] = e.message
lowerCAmelCase__ : Tuple = e.exit_layer
lowerCAmelCase__ : str = outputs[0]
if not self.training:
lowerCAmelCase__ : Optional[int] = entropy(__lowerCamelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : Dict = MSELoss()
lowerCAmelCase__ : List[Any] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Any = CrossEntropyLoss()
lowerCAmelCase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
lowerCAmelCase__ : Dict = []
for highway_exit in outputs[-1]:
lowerCAmelCase__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : int = MSELoss()
lowerCAmelCase__ : int = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase__ : Dict = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
lowerCAmelCase__ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase__ : Tuple = (loss,) + outputs
if not self.training:
lowerCAmelCase__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase__ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 647
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Any = 'biogpt'
def __init__( self , _A=42384 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1024 , _A=0.0_2 , _A=1E-12 , _A=True , _A=True , _A=0.0 , _A=0.0 , _A=1 , _A=0 , _A=2 , **_A , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = scale_embedding
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = activation_dropout
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
| 620
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=False , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_reduce_labels
def lowerCAmelCase__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(dataset[0]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(dataset[1]['file'] )
return image, map
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[1]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[2]['file'] )
SCREAMING_SNAKE_CASE_ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = BeitImageProcessingTester(self)
@property
def lowerCAmelCase__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'size'))
self.assertTrue(hasattr(_A , 'do_center_crop'))
self.assertTrue(hasattr(_A , 'center_crop'))
self.assertTrue(hasattr(_A , 'do_normalize'))
self.assertTrue(hasattr(_A , 'image_mean'))
self.assertTrue(hasattr(_A , 'image_std'))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 20, 'width': 20})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
self.assertEqual(image_processor.do_reduce_labels , _A)
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
self.assertEqual(image_processor.do_reduce_labels , _A)
def lowerCAmelCase__ ( self):
pass
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
def lowerCAmelCase__ ( self):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 150)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = image_processing(_A , _A , return_tensors='pt')
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
| 620
| 1
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowercase__ : int = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = sylvester(number - 1 )
lowercase__ : str = num - 1
lowercase__ : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 496
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 496
| 1
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE__ = namedtuple("covid_data", "cases deaths recovered")
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
snake_case = '//div[@class = \"maincounter-number\"]/span/text()'
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE__ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 700
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='fill-mask' , model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='fill-mask' , model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import pipeline\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
snake_case = self.get_env()
snake_case = '1'
snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import AutoModel\n '
snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 104
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Optional[int] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Tuple = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : List[Any] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102
|
"""simple docstring"""
from math import ceil, sqrt
def UpperCamelCase (SCREAMING_SNAKE_CASE = 100_0000 ):
UpperCamelCase : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCamelCase : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCamelCase : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=10 , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=0.9 , lowerCAmelCase_=None , ):
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =patch_size
_lowercase =tubelet_size
_lowercase =num_frames
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =mask_ratio
_lowercase =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowercase =(image_size // patch_size) ** 2
_lowercase =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowercase =int(mask_ratio * self.seq_length )
def __lowerCAmelCase ( self ):
_lowercase =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =VideoMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =VideoMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase =torch.ones((self.num_masks,) )
_lowercase =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowercase =mask.expand(self.batch_size , -1 ).bool()
_lowercase =model(lowerCAmelCase_ , lowerCAmelCase_ )
# model only returns predictions for masked patches
_lowercase =mask.sum().item()
_lowercase =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowerCAmelCase ( self ):
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCAmelCase ( self ):
_lowercase =VideoMAEModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
_lowercase =copy.deepcopy(lowerCAmelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase =torch.ones((self.model_tester.num_masks,) )
_lowercase =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowercase =mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowercase =bool_masked_pos.to(lowerCAmelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase_ ),
]:
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def __lowerCAmelCase ( self ):
pass
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase_ )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =VideoMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
if not self.has_attentions:
pass
else:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
for model_class in self.all_model_classes:
_lowercase =self.model_tester.seq_length - self.model_tester.num_masks
_lowercase =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowercase =True
_lowercase =False
_lowercase =True
_lowercase =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_lowercase =outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase =True
_lowercase =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_lowercase =outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowercase =len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_lowercase =True
_lowercase =True
_lowercase =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_lowercase =outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self ):
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_lowercase =outputs.hidden_states
_lowercase =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_lowercase =self.model_tester.seq_length - self.model_tester.num_masks
_lowercase =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCAmelCase ( self ):
pass
def __lowerCamelCase ( ) -> Tuple:
_lowercase =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_lowercase =np.load(__a )
return list(__a )
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
_lowercase =VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
lowerCAmelCase_ )
_lowercase =self.default_image_processor
_lowercase =prepare_video()
_lowercase =image_processor(lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase_ )
# verify the logits
_lowercase =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_lowercase =torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowercase =VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(lowerCAmelCase_ )
_lowercase =self.default_image_processor
_lowercase =prepare_video()
_lowercase =image_processor(lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# add boolean mask, indicating which patches to mask
_lowercase =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_lowercase =torch.load(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase_ )
# verify the logits
_lowercase =torch.Size([1, 1408, 1536] )
_lowercase =torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase_ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowercase =torch.tensor([0.5_1_4_2] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowercase =VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=lowerCAmelCase_ ).to(
lowerCAmelCase_ )
with torch.no_grad():
_lowercase =model(**lowerCAmelCase_ )
_lowercase =torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
| 594
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
_lowercase =json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 594
| 1
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase_ = [0, 25, 50]
lowercase_ = [25, 50, 75]
lowercase_ = fuzz.membership.trimf(X, abca)
lowercase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase_ = np.ones(75)
lowercase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 11
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowerCAmelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ) -> None:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 76
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = botoa.client("""iam""" )
__magic_name__ = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) )
__magic_name__ = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def a__ ( ):
'''simple docstring'''
__magic_name__ = _ask_options(
"""How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, )
__magic_name__ = None
if credentials_configuration == 0:
__magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" )
__magic_name__ = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__magic_name__ = _ask_field("""AWS Access Key ID: """ )
__magic_name__ = aws_access_key_id
__magic_name__ = _ask_field("""AWS Secret Access Key: """ )
__magic_name__ = aws_secret_access_key
__magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" )
__magic_name__ = aws_region
__magic_name__ = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, )
if role_management == 0:
__magic_name__ = _ask_field("""Enter your IAM role name: """ )
else:
__magic_name__ = """accelerate_sagemaker_execution_role"""
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__magic_name__ = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = None
if is_custom_docker_image:
__magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() )
__magic_name__ = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = None
if is_sagemaker_inputs_enabled:
__magic_name__ = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), )
__magic_name__ = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = None
if is_sagemaker_metrics_enabled:
__magic_name__ = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), )
__magic_name__ = _ask_options(
"""What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, )
__magic_name__ = {}
__magic_name__ = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
if use_dynamo:
__magic_name__ = """dynamo_"""
__magic_name__ = _ask_options(
"""Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
__magic_name__ = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
if use_custom_options:
__magic_name__ = _ask_options(
"""Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", )
__magic_name__ = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", )
__magic_name__ = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__magic_name__ = _ask_options(
A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" )
__magic_name__ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__magic_name__ = _ask_field(
"""How many machines do you want use? [1]: """, A_, default=1, )
__magic_name__ = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
| 76
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=2 , __a=32 , __a=16 , __a=3 , __a=True , __a=True , __a=32 , __a=4 , __a=[0, 1, 2, 3] , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.02 , __a=3 , __a=[1, 3_84, 24, 24] , __a=True , __a=None , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = backbone_out_indices
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = backbone_featmap_shape
_UpperCamelCase = scope
_UpperCamelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__a , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = DPTModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = DPTForDepthEstimation(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = DPTForSemanticSegmentation(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = DPTModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
if model_class in get_values(__a):
continue
_UpperCamelCase = model_class(__a)
model.to(__a)
model.train()
_UpperCamelCase = self._prepare_for_class(__a , __a , return_labels=__a)
_UpperCamelCase = model(**__a).loss
loss.backward()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = False
_UpperCamelCase = True
if model_class in get_values(__a) or not model_class.supports_gradient_checkpointing:
continue
_UpperCamelCase = model_class(__a)
model.to(__a)
model.gradient_checkpointing_enable()
model.train()
_UpperCamelCase = self._prepare_for_class(__a , __a , return_labels=__a)
_UpperCamelCase = model(**__a).loss
loss.backward()
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
_UpperCamelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCamelCase = DPTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = '''add'''
with self.assertRaises(__a):
_UpperCamelCase = DPTForDepthEstimation(__a)
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''')
_UpperCamelCase = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCamelCase = torch.Size((1, 3_84, 3_84))
self.assertEqual(predicted_depth.shape , __a)
_UpperCamelCase = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]]).to(__a)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __a , atol=1e-4))
| 19
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase : Tuple =(
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Any =False
lowerCamelCase : str =False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
a : Dict = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
a : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[str]:
a : Union[str, Any] = parent
a : str = batch_size
a : Optional[int] = seq_length
a : Tuple = is_training
a : int = use_input_mask
a : Union[str, Any] = use_token_type_ids
a : List[str] = use_labels
a : str = vocab_size
a : List[Any] = hidden_size
a : Any = num_hidden_layers
a : List[Any] = num_attention_heads
a : Tuple = intermediate_size
a : str = hidden_act
a : List[Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : Tuple = type_vocab_size
a : Union[str, Any] = type_sequence_label_size
a : Optional[int] = initializer_range
a : Any = num_labels
a : str = num_choices
a : List[Any] = scope
a : str = embedding_size
def __a ( self ) -> Dict:
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[int] = None
if self.use_input_mask:
a : int = random_attention_mask([self.batch_size, self.seq_length] )
a : int = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Optional[Any] = None
a : List[str] = None
a : str = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : str = ids_tensor([self.batch_size] , self.num_choices )
a : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : List[Any] = TFMobileBertModel(config=lowerCAmelCase__ )
a : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : List[Any] = model(lowerCAmelCase__ )
a : List[str] = [input_ids, input_mask]
a : Dict = model(lowerCAmelCase__ )
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : List[Any] = TFMobileBertForMaskedLM(config=lowerCAmelCase__ )
a : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : int = TFMobileBertForNextSentencePrediction(config=lowerCAmelCase__ )
a : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : Dict = TFMobileBertForPreTraining(config=lowerCAmelCase__ )
a : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[str] = self.num_labels
a : List[str] = TFMobileBertForSequenceClassification(config=lowerCAmelCase__ )
a : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[int] = self.num_choices
a : Tuple = TFMobileBertForMultipleChoice(config=lowerCAmelCase__ )
a : Dict = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Optional[Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Dict = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Any = self.num_labels
a : Union[str, Any] = TFMobileBertForTokenClassification(config=lowerCAmelCase__ )
a : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : Dict = TFMobileBertForQuestionAnswering(config=lowerCAmelCase__ )
a : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> List[str]:
a : Dict = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Dict = config_and_inputs
a : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def __a ( self ) -> List[str]:
a : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
a : str = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> Any:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> Any:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
a : Union[str, Any] = TFMobileBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Optional[int]:
a : Optional[int] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
a : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
a : int = model(lowerCAmelCase__ )[0]
a : Optional[int] = [1, 6, 3_0522]
self.assertEqual(output.shape , lowerCAmelCase__ )
a : Optional[Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 633
| 0
|
"""simple docstring"""
def A__ ( UpperCamelCase = 1_000 ):
return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = ProphetNetTokenizer
UpperCamelCase = False
def lowerCamelCase ( self :Any ):
super().setUp()
A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self :Any , __UpperCamelCase :List[str] ):
A = "UNwant\u00E9d,running"
A = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Any ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCamelCase ( self :Tuple ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Optional[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCamelCase ( self :List[Any] ):
A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A = {}
for i, token in enumerate(__UpperCamelCase ):
A = i
A = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase ( self :Optional[Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCamelCase ( self :Any ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowerCamelCase ( self :Dict ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
A = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 524
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 332
|
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : List[Any] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
A_ : Tuple = min(_lowerCAmelCase ,_lowerCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 569
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase ):
_a : Tuple = (UniPCMultistepScheduler,)
_a : Optional[Any] = (('''num_inference_steps''', 25),)
def _UpperCamelCase ( self : List[Any] , **a : List[Any] ):
"""simple docstring"""
__snake_case : List[str] ={
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**a )
return config
def _UpperCamelCase ( self : Any , a : Union[str, Any]=0 , **a : List[Any] ):
"""simple docstring"""
__snake_case : Optional[int] =dict(self.forward_default_kwargs )
__snake_case : str =kwargs.pop('''num_inference_steps''' , a )
__snake_case : str =self.dummy_sample
__snake_case : Optional[Any] =0.1 * sample
__snake_case : List[str] =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case : Union[str, Any] =self.get_scheduler_config(**a )
__snake_case : List[Any] =scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals
__snake_case : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
__snake_case : Optional[Any] =scheduler_class.from_pretrained(a )
new_scheduler.set_timesteps(a )
# copy over dummy past residuals
__snake_case : Union[str, Any] =dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case : Union[str, Any] =sample, sample
for t in range(a , time_step + scheduler.config.solver_order + 1 ):
__snake_case : Optional[Any] =scheduler.step(a , a , a , **a ).prev_sample
__snake_case : str =new_scheduler.step(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self : Optional[int] , a : Optional[Any]=0 , **a : Optional[int] ):
"""simple docstring"""
__snake_case : Tuple =dict(self.forward_default_kwargs )
__snake_case : Union[str, Any] =kwargs.pop('''num_inference_steps''' , a )
__snake_case : Tuple =self.dummy_sample
__snake_case : Optional[Any] =0.1 * sample
__snake_case : Union[str, Any] =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case : Tuple =self.get_scheduler_config()
__snake_case : Tuple =scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case : str =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
__snake_case : str =scheduler_class.from_pretrained(a )
# copy over dummy past residuals
new_scheduler.set_timesteps(a )
# copy over dummy past residual (must be after setting timesteps)
__snake_case : int =dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case : Optional[Any] =scheduler.step(a , a , a , **a ).prev_sample
__snake_case : Dict =new_scheduler.step(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self : str , a : List[Any]=None , **a : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case : List[str] =self.scheduler_classes[0]
__snake_case : int =self.get_scheduler_config(**a )
__snake_case : List[str] =scheduler_class(**a )
__snake_case : Any =self.scheduler_classes[0]
__snake_case : List[str] =self.get_scheduler_config(**a )
__snake_case : Tuple =scheduler_class(**a )
__snake_case : str =1_0
__snake_case : str =self.dummy_model()
__snake_case : Optional[Any] =self.dummy_sample_deter
scheduler.set_timesteps(a )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : Union[str, Any] =model(a , a )
__snake_case : Any =scheduler.step(a , a , a ).prev_sample
return sample
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : List[str] =dict(self.forward_default_kwargs )
__snake_case : Tuple =kwargs.pop('''num_inference_steps''' , a )
for scheduler_class in self.scheduler_classes:
__snake_case : int =self.get_scheduler_config()
__snake_case : Any =scheduler_class(**a )
__snake_case : List[str] =self.dummy_sample
__snake_case : Dict =0.1 * sample
if num_inference_steps is not None and hasattr(a , '''set_timesteps''' ):
scheduler.set_timesteps(a )
elif num_inference_steps is not None and not hasattr(a , '''set_timesteps''' ):
__snake_case : Optional[Any] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__snake_case : str =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__snake_case : int =dummy_past_residuals[: scheduler.config.solver_order]
__snake_case : Any =scheduler.timesteps[5]
__snake_case : int =scheduler.timesteps[6]
__snake_case : int =scheduler.step(a , a , a , **a ).prev_sample
__snake_case : Optional[int] =scheduler.step(a , a , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : str =UniPCMultistepScheduler(**self.get_scheduler_config() )
__snake_case : Optional[Any] =self.full_loop(scheduler=a )
__snake_case : Tuple =torch.mean(torch.abs(a ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
__snake_case : List[Any] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case : Dict =DEISMultistepScheduler.from_config(scheduler.config )
__snake_case : Tuple =DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case : Optional[int] =UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case : Any =self.full_loop(scheduler=a )
__snake_case : Optional[int] =torch.mean(torch.abs(a ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def _UpperCamelCase ( self : str ):
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=a )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , solver_order=a , solver_type=a , )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a , solver_type=a , prediction_type=a , )
__snake_case : Optional[int] =self.full_loop(
solver_order=a , solver_type=a , prediction_type=a , )
assert not torch.isnan(a ).any(), "Samples have nan numbers"
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a )
self.check_over_configs(lower_order_final=a )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=a , time_step=0 )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : int =self.full_loop()
__snake_case : str =torch.mean(torch.abs(a ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Dict =self.full_loop(prediction_type='''v_prediction''' )
__snake_case : List[Any] =torch.mean(torch.abs(a ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =self.scheduler_classes[0]
__snake_case : Tuple =self.get_scheduler_config(thresholding=a , dynamic_thresholding_ratio=0 )
__snake_case : Union[str, Any] =scheduler_class(**a )
__snake_case : Optional[int] =1_0
__snake_case : int =self.dummy_model()
__snake_case : int =self.dummy_sample_deter.half()
scheduler.set_timesteps(a )
for i, t in enumerate(scheduler.timesteps ):
__snake_case : str =model(a , a )
__snake_case : List[Any] =scheduler.step(a , a , a ).prev_sample
assert sample.dtype == torch.floataa
def _UpperCamelCase ( self : int , **a : Optional[int] ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
__snake_case : Tuple =self.get_scheduler_config(**a )
__snake_case : str =scheduler_class(**a )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 715
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( a : int , a : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__snake_case : List[str] =number_of_bytes // partitions
__snake_case : str =[]
for i in range(a ):
__snake_case : Optional[Any] =i * bytes_per_partition + 1
__snake_case : Any =(
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : int = quote(_lowercase )
return hfh.hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' , revision=_lowercase )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowerCAmelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowerCAmelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def __snake_case ( ) -> Any:
"""simple docstring"""
A = HfArgumentParser((ModelArguments,) )
((A) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A = True
A = True
A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A = decoder_config.decoder_start_token_id
A = decoder_config.pad_token_id
if decoder_start_token_id is None:
A = decoder_config.bos_token_id
if pad_token_id is None:
A = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A = decoder_config.eos_token_id
A = decoder_start_token_id
A = pad_token_id
A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 91
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : int ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=_lowercase , )
assert hasattr(self , 'env' )
def __a ( self : Optional[int] , _lowercase : int ):
A = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
A = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowercase , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version='py36' , )
def __a ( self : Tuple , _lowercase : List[str] ):
TrainingJobAnalytics(_lowercase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __a ( self : List[Any] , _lowercase : Union[str, Any] ):
# create estimator
A = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowercase )
| 91
| 1
|
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE : Tuple = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=None ) -> Dict:
require_version(deps[pkg] , snake_case_ )
| 661
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : str=1 ) -> str:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = []
for old_item in old_list:
__lowerCAmelCase = old_item.replace("""in_layers.0""" , """norm1""" )
__lowerCAmelCase = new_item.replace("""in_layers.2""" , """conv1""" )
__lowerCAmelCase = new_item.replace("""out_layers.0""" , """norm2""" )
__lowerCAmelCase = new_item.replace("""out_layers.3""" , """conv2""" )
__lowerCAmelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
__lowerCAmelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" )
__lowerCAmelCase = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : int=0 ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
for old_item in old_list:
__lowerCAmelCase = old_item
__lowerCAmelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" )
__lowerCAmelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" )
__lowerCAmelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
__lowerCAmelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
__lowerCAmelCase = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : List[str]=None , snake_case_ : List[Any]=None , snake_case_ : List[Any]=None ) -> Any:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__lowerCAmelCase = old_checkpoint[path]
__lowerCAmelCase = old_tensor.shape[0] // 3
__lowerCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__lowerCAmelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3
__lowerCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
__lowerCAmelCase = query.reshape(snake_case_ )
__lowerCAmelCase = key.reshape(snake_case_ )
__lowerCAmelCase = value.reshape(snake_case_ )
for path in paths:
__lowerCAmelCase = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__lowerCAmelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
__lowerCAmelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
__lowerCAmelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
__lowerCAmelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__lowerCAmelCase = old_checkpoint[path["""old"""]][:, :, 0]
else:
__lowerCAmelCase = old_checkpoint[path["""old"""]]
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = checkpoint["""time_embed.0.weight"""]
__lowerCAmelCase = checkpoint["""time_embed.0.bias"""]
__lowerCAmelCase = checkpoint["""time_embed.2.weight"""]
__lowerCAmelCase = checkpoint["""time_embed.2.bias"""]
__lowerCAmelCase = checkpoint["""input_blocks.0.0.weight"""]
__lowerCAmelCase = checkpoint["""input_blocks.0.0.bias"""]
__lowerCAmelCase = checkpoint["""out.0.weight"""]
__lowerCAmelCase = checkpoint["""out.0.bias"""]
__lowerCAmelCase = checkpoint["""out.2.weight"""]
__lowerCAmelCase = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
__lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the middle blocks only
__lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the output blocks only
__lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
__lowerCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
for i in range(1 , snake_case_ ):
__lowerCAmelCase = (i - 1) // (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = (i - 1) % (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
__lowerCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
__lowerCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
__lowerCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
__lowerCAmelCase = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
__lowerCAmelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ )
if len(snake_case_ ):
__lowerCAmelCase = renew_attention_paths(snake_case_ )
__lowerCAmelCase = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__lowerCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , )
__lowerCAmelCase = middle_blocks[0]
__lowerCAmelCase = middle_blocks[1]
__lowerCAmelCase = middle_blocks[2]
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
__lowerCAmelCase = renew_attention_paths(snake_case_ )
__lowerCAmelCase = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ )
for i in range(snake_case_ ):
__lowerCAmelCase = i // (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = i % (config["""num_res_blocks"""] + 1)
__lowerCAmelCase = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]]
__lowerCAmelCase = {}
for layer in output_block_layers:
__lowerCAmelCase , __lowerCAmelCase = layer.split(""".""" )[0], shave_segments(snake_case_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case_ )
else:
__lowerCAmelCase = [layer_name]
if len(snake_case_ ) > 1:
__lowerCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
__lowerCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
__lowerCAmelCase = renew_resnet_paths(snake_case_ )
__lowerCAmelCase = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__lowerCAmelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
__lowerCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
__lowerCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(snake_case_ ) == 2:
__lowerCAmelCase = []
if len(snake_case_ ):
__lowerCAmelCase = renew_attention_paths(snake_case_ )
__lowerCAmelCase = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__lowerCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case_ , )
else:
__lowerCAmelCase = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__lowerCAmelCase = """.""".join(["""output_blocks""", str(snake_case_ ), path["""old"""]] )
__lowerCAmelCase = """.""".join(["""up_blocks""", str(snake_case_ ), """resnets""", str(snake_case_ ), path["""new"""]] )
__lowerCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_A : Tuple = parser.parse_args()
_A : Optional[int] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_A : Optional[int] = json.loads(f.read())
_A : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_A : Dict = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_A : Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_A : Optional[Any] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_A : List[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 427
| 0
|
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = 0
while b > 0:
if b & 1:
__lowerCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 474
|
'''simple docstring'''
from manim import *
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = Text('''CPU''' , font_size=24 )
__lowerCAmelCase = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__lowerCAmelCase = [mem.copy() for i in range(1 )]
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = Text('''GPU''' , font_size=24 )
__lowerCAmelCase = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.align_to(__lowercase , __lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowercase )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = Text('''Model''' , font_size=24 )
__lowerCAmelCase = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) , )
__lowerCAmelCase = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=2.5 ) , Write(__lowercase ) , Write(__lowercase ) )
self.add(__lowercase )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__lowercase ):
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
cpu_target.move_to(__lowercase )
cpu_target.generate_target()
__lowerCAmelCase = 0.4_6 / 4
__lowerCAmelCase = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__lowercase , buff=0.0 )
cpu_targs.append(__lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowercase ) )
second_animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(*__lowercase )
self.wait()
| 474
| 1
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=2 ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = parent
__SCREAMING_SNAKE_CASE :Any = batch_size
__SCREAMING_SNAKE_CASE :Dict = patch_size
__SCREAMING_SNAKE_CASE :List[Any] = max_length
__SCREAMING_SNAKE_CASE :str = num_mel_bins
__SCREAMING_SNAKE_CASE :Optional[Any] = is_training
__SCREAMING_SNAKE_CASE :Optional[int] = use_labels
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Any = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE :List[str] = hidden_act
__SCREAMING_SNAKE_CASE :str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :str = scope
__SCREAMING_SNAKE_CASE :List[str] = frequency_stride
__SCREAMING_SNAKE_CASE :Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__SCREAMING_SNAKE_CASE :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__SCREAMING_SNAKE_CASE :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
__SCREAMING_SNAKE_CASE :Tuple = frequency_out_dimension * time_out_dimension
__SCREAMING_SNAKE_CASE :List[str] = num_patches + 2
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__SCREAMING_SNAKE_CASE :List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) :Optional[int] = config_and_inputs
__SCREAMING_SNAKE_CASE :List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = ASTModelTester(self )
__SCREAMING_SNAKE_CASE :Any = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Dict = model_class(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :Optional[int] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[int] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__SCREAMING_SNAKE_CASE :List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.default_feature_extractor
__SCREAMING_SNAKE_CASE :Optional[Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = self.default_feature_extractor
__SCREAMING_SNAKE_CASE :Dict = prepare_audio()
__SCREAMING_SNAKE_CASE :str = audio.squeeze().numpy()
__SCREAMING_SNAKE_CASE :Any = feature_extractor(lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Any = model(**lowerCamelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
| 498
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37
| 0
|
from __future__ import annotations
def a__ ( __UpperCamelCase ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 16
A : Optional[Any] = 32
def a__ ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ = load_dataset("glue" , "mrpc" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ = 8
else:
SCREAMING_SNAKE_CASE_ = None
return tokenizer.pad(
__UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Tuple = mocked_dataloaders # noqa: F811
def a__ ( __UpperCamelCase , __UpperCamelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE_ = 2
# New Code #
SCREAMING_SNAKE_CASE_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config["lr"]
SCREAMING_SNAKE_CASE_ = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ = int(config["seed"] )
SCREAMING_SNAKE_CASE_ = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_ = evaluate.load("glue" , "mrpc" )
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__UpperCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 356
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : Dict = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
a__ : List[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
a__ : List[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
a__ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6_0_0_0,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
a__ : Dict = tempfile.mkdtemp()
a__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a__ : Optional[Any] = os.path.join(self.tmpdirname , A__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
# load decoder from hub
a__ : List[str] = '''hf-internal-testing/ngram-beam-search-decoder'''
def __lowerCAmelCase ( self : Optional[int] , **A__ : Any ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(A__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : str , **A__ : Tuple ) -> int:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A__ )
def __lowerCAmelCase ( self : Any , **A__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A__ )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : Dict = self.get_feature_extractor()
a__ : Optional[Any] = self.get_decoder()
a__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
processor.save_pretrained(self.tmpdirname )
a__ : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A__ )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.get_feature_extractor()
a__ : Any = self.get_tokenizer()
a__ : Tuple = self.get_decoder()
a__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : List[str] = floats_list((3, 1_0_0_0) )
a__ : List[str] = feature_extractor(A__ , return_tensors='''np''' )
a__ : Dict = processor(A__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_decoder()
a__ : int = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : Optional[Any] = '''This is a test string'''
a__ : Optional[int] = processor(text=A__ )
a__ : Optional[int] = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] , A__ : int=(2, 1_0, 1_6) , A__ : Union[str, Any]=7_7 ) -> int:
'''simple docstring'''
np.random.seed(A__ )
return np.random.rand(*A__ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = self.get_feature_extractor()
a__ : int = self.get_tokenizer()
a__ : Optional[int] = self.get_decoder()
a__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : List[str] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
a__ : List[Any] = processor.decode(A__ )
a__ : Union[str, Any] = decoder.decode_beams(A__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __lowerCAmelCase ( self : Optional[int] , A__ : str ) -> Any:
'''simple docstring'''
a__ : str = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : List[str] = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a__ : str = processor.batch_decode(A__ )
else:
with get_context(A__ ).Pool() as pool:
a__ : Optional[Any] = processor.batch_decode(A__ , A__ )
a__ : Dict = list(A__ )
with get_context('''fork''' ).Pool() as p:
a__ : Any = decoder.decode_beams_batch(A__ , A__ )
a__ , a__ , a__ : Union[str, Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A__ , decoded_processor.logit_score )
self.assertListEqual(A__ , decoded_processor.lm_score )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = self.get_feature_extractor()
a__ : List[str] = self.get_tokenizer()
a__ : Tuple = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : Optional[Any] = self._get_dummy_logits()
a__ : Dict = 1_5
a__ : Optional[Any] = -20.0
a__ : Tuple = -4.0
a__ : str = processor.batch_decode(
A__ , beam_width=A__ , beam_prune_logp=A__ , token_min_logp=A__ , )
a__ : List[Any] = decoded_processor_out.text
a__ : int = list(A__ )
with get_context('''fork''' ).Pool() as pool:
a__ : Tuple = decoder.decode_beams_batch(
A__ , A__ , beam_width=A__ , beam_prune_logp=A__ , token_min_logp=A__ , )
a__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
a__ : str = [d[0][2] for d in decoded_decoder_out]
a__ : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A__ , A__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A__ )
self.assertTrue(np.array_equal(A__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , A__ , atol=1E-3 ) )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : str = self.get_decoder()
a__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
a__ : List[str] = self._get_dummy_logits()
a__ : Tuple = 2.0
a__ : Optional[Any] = 5.0
a__ : Optional[int] = -20.0
a__ : Tuple = True
a__ : int = processor.batch_decode(
A__ , alpha=A__ , beta=A__ , unk_score_offset=A__ , lm_score_boundary=A__ , )
a__ : Any = decoded_processor_out.text
a__ : Dict = list(A__ )
decoder.reset_params(
alpha=A__ , beta=A__ , unk_score_offset=A__ , lm_score_boundary=A__ , )
with get_context('''fork''' ).Pool() as pool:
a__ : Optional[int] = decoder.decode_beams_batch(
A__ , A__ , )
a__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A__ , A__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A__ )
a__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A__ )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : str = processor.decoder.model_container[processor.decoder._model_key]
a__ : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
a__ : Tuple = os.listdir(A__ )
a__ : List[str] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
a__ : List[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
a__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained(A__ )
a__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
a__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
a__ : List[Any] = os.listdir(A__ )
a__ : int = os.listdir(A__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A__ , A__ )
def __lowerCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : Optional[int] = floats_list((3, 1_0_0_0) )
a__ : Any = processor_wavaveca(A__ , return_tensors='''np''' )
a__ : List[Any] = processor_auto(A__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
a__ : int = self._get_dummy_logits()
a__ : Optional[int] = processor_wavaveca.batch_decode(A__ )
a__ : List[Any] = processor_auto.batch_decode(A__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __lowerCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
a__ : Tuple = self.get_feature_extractor()
a__ : Optional[int] = self.get_tokenizer()
a__ : List[str] = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=A__ , feature_extractor=A__ , decoder=A__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __lowerCAmelCase ( A__ : List[str] , A__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : List[str] = self._get_dummy_logits()[0]
a__ : Optional[Any] = processor.decode(A__ , output_word_offsets=A__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A__ , A__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
a__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a__ : Optional[Any] = self._get_dummy_logits()
a__ : str = processor.batch_decode(A__ , output_word_offsets=A__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A__ , A__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
import torch
a__ : Optional[int] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A__ )
a__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
a__ : str = iter(A__ )
a__ : Any = next(A__ )
a__ : Dict = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
a__ : Optional[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a__ : Optional[Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
a__ : Optional[Any] = model(A__ ).logits.cpu().numpy()
a__ : Tuple = processor.decode(logits[0] , output_word_offsets=A__ )
a__ : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a__ : Optional[int] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
a__ : Optional[Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A__ , '''word''' ) ) , A__ )
self.assertEqual(''' '''.join(self.get_from_offsets(A__ , '''word''' ) ) , output.text )
# output times
a__ : List[Any] = torch.tensor(self.get_from_offsets(A__ , '''start_time''' ) )
a__ : Optional[Any] = torch.tensor(self.get_from_offsets(A__ , '''end_time''' ) )
# fmt: off
a__ : List[str] = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
a__ : Union[str, Any] = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A__ , A__ , atol=0.01 ) )
self.assertTrue(torch.allclose(A__ , A__ , atol=0.01 ) )
| 688
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688
| 1
|
import random
def UpperCamelCase ( _a , _a ) -> tuple:
'''simple docstring'''
lowercase_ :int = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def UpperCamelCase ( _a , _a ) -> List[str]:
'''simple docstring'''
if index >= len(_a ) or index < 0:
return None
lowercase_ :Optional[Any] = items[random.randint(0 , len(_a ) - 1 )]
lowercase_ :str = 0
lowercase_ :List[Any] = _partition(_a , _a )
lowercase_ :Union[str, Any] = len(_a )
lowercase_ :List[Any] = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 708
|
from torch import nn
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 441
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
lowerCamelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__a = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
__a = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == """group""" , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(SCREAMING_SNAKE_CASE_ )[0].split(""".""" )[-2]
__a = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
__a = """weight_g"""
elif "weight_v" in name:
__a = """weight_v"""
elif "bias" in name:
__a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = """weight"""
else:
__a = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__a = full_name.split("""conv_layers.""" )[-1]
__a = name.split(""".""" )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__a = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__a = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__a = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__a = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
"""simple docstring"""
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__a = UniSpeechSatConfig()
__a = """"""
if is_finetuned:
__a = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE_ )
else:
__a = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE_ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 225
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 , qkv_bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" , check_hash=SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ), "First elements of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 591
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__):
_lowerCAmelCase : List[Any] = 'resnet'
_lowerCAmelCase : List[str] = ['basic', 'bottleneck']
def __init__( self : List[Any] , lowercase_ : Union[str, Any]=3 , lowercase_ : str=64 , lowercase_ : Optional[int]=[256, 512, 1024, 2048] , lowercase_ : str=[3, 4, 6, 3] , lowercase_ : Any="bottleneck" , lowercase_ : List[str]="relu" , lowercase_ : str=False , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]=None , **lowercase_ : str , ):
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case_ : List[str] = num_channels
snake_case_ : Dict = embedding_size
snake_case_ : int = hidden_sizes
snake_case_ : Tuple = depths
snake_case_ : Tuple = layer_type
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = downsample_in_first_stage
snake_case_ : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowercase_ ) + 1 )]
snake_case_, snake_case_ : List[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[int] = version.parse("""1.11""")
@property
def _snake_case ( self : Any ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1E-3
| 701
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _a ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ):
snake_case_ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
snake_case_ : List[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
snake_case_, snake_case_ : Optional[Any] = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
snake_case_ : Optional[int] = parse_unknown_args(_a )
# Run
snake_case_ : Optional[int] = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 485
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : str = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 8
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase__ = 'path-to-your-trained-model'
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
UpperCamelCase__ = 'A photo of sks dog in a bucket'
UpperCamelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 110
| 0
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=None ):
_lowerCamelCase : str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
_lowerCamelCase : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = []
def __init__( self , lowercase , lowercase , lowercase , lowercase=None ):
_lowerCamelCase : Any = obj
_lowerCamelCase : List[str] = target
_lowerCamelCase : str = new
_lowerCamelCase : Optional[Any] = target.split('.' )[0]
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Union[str, Any] = attrs or []
def __enter__( self ):
*_lowerCamelCase, _lowerCamelCase : List[Any] = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
_lowerCamelCase : int = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : Optional[Any] = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Any = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
_lowerCamelCase : List[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
_lowerCamelCase : int = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : Optional[int] = getattr(import_module('.'.join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
_lowerCamelCase : str = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : Union[str, Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def A_ ( self ):
self.__enter__()
self._active_patches.append(self )
def A_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 492
|
"""simple docstring"""
from math import pow, sqrt
def _snake_case ( *lowercase__ ):
_lowerCamelCase : Dict = len(lowercase__ ) > 0 and all(value > 0.0 for value in values )
return result
def _snake_case ( lowercase__ , lowercase__ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 492
| 1
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : List[str] = 10_00 )-> int:
'''simple docstring'''
__snake_case , __snake_case = 1, 1
__snake_case = 2
while True:
__snake_case = 0
__snake_case = fa + fa
__snake_case , __snake_case = fa, f
index += 1
for _ in str(lowerCAmelCase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 24
|
import math
def a (lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a (lowerCAmelCase__ = 10_001 ):
try:
__a = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
__a = []
__a = 2
while len(lowerCAmelCase__ ) < nth:
if is_prime(lowerCAmelCase__ ):
primes.append(lowerCAmelCase__ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase__ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99
| 0
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int] , snake_case : List[Any] , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : int = 32 , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , snake_case : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , snake_case : bool = True , snake_case : Union[str, Any]=7 , snake_case : Tuple=30 , snake_case : Union[str, Any]=400 , snake_case : Optional[Any]=3 , ) -> Union[str, Any]:
'''simple docstring'''
A = parent
A = do_resize
A = size if size is not None else {'shortest_edge': 288}
A = size_divisor
A = do_rescale
A = rescale_factor
A = do_normalize
A = do_center_crop
A = image_mean
A = image_std
A = do_pad
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , snake_case : str , snake_case : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
A = self.size['shortest_edge']
A = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A , A = image.size
else:
A , A = image.shape[1], image.shape[2]
A = size / min(snake_case , snake_case )
if h < w:
A , A = size, scale * w
else:
A , A = scale * h, size
A = int((1_333 / 800) * size )
if max(snake_case , snake_case ) > max_size:
A = max_size / max(snake_case , snake_case )
A = newh * scale
A = neww * scale
A , A = int(newh + 0.5 ), int(neww + 0.5 )
A , A = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A = []
for image in image_inputs:
A , A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(snake_case , key=lambda snake_case : item[0] )[0]
A = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
A = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'size_divisor' ) )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(snake_case , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(snake_case , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(snake_case , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Tuple = """decision_transformer"""
lowerCAmelCase_ : Tuple = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , snake_case : int=17 , snake_case : int=4 , snake_case : Union[str, Any]=128 , snake_case : Optional[Any]=4_096 , snake_case : List[Any]=True , snake_case : str=1 , snake_case : str=1_024 , snake_case : List[str]=3 , snake_case : str=1 , snake_case : Any=None , snake_case : Optional[int]="relu" , snake_case : List[Any]=0.1 , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Union[str, Any]=1E-5 , snake_case : Optional[int]=0.02 , snake_case : List[Any]=True , snake_case : Tuple=True , snake_case : Optional[Any]=50_256 , snake_case : List[Any]=50_256 , snake_case : List[str]=False , snake_case : Any=False , **snake_case : List[str] , ) -> List[Any]:
'''simple docstring'''
A = state_dim
A = act_dim
A = hidden_size
A = max_ep_len
A = action_tanh
A = vocab_size
A = n_positions
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = scale_attn_by_inverse_layer_idx
A = reorder_and_upcast_attn
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 109
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=False , a_=True , a_=False , a_=False , a_=1_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a_ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = EsmForProteinFolding(config=a_ ).float()
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , attention_mask=a_ )
UpperCAmelCase = model(a_ )
UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( a , a , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCAmelCase : int = ()
__lowerCAmelCase : Optional[Any] = {} if is_torch_available() else {}
__lowerCAmelCase : Optional[Any] = False
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = EsmFoldModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=3_7 )
def snake_case_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip('Does not support attention outputs' )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip
def snake_case_ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('ESMFold only has one output format.' )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip('ESMFold does not support input chunking.' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
pass
@require_torch
class lowercase_ ( a ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
UpperCAmelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
UpperCAmelCase = model(a_ )['positions']
UpperCAmelCase = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a_ , atol=1E-4 ) )
| 447
|
'''simple docstring'''
import qiskit
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 447
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = """▁"""
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
UpperCAmelCase = {
"""google/pegasus-xsum""": 512,
}
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<pad>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<mask_2>" , _UpperCAmelCase="<mask_1>" , _UpperCAmelCase=None , _UpperCAmelCase=1_03 , _UpperCAmelCase = None , **_UpperCAmelCase , ):
snake_case_ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
F''' {type(_UpperCAmelCase )}''' )
snake_case_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case_ = additional_special_tokens_extended
else:
snake_case_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
snake_case_ = mask_token_sent
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
# add special tokens to encoder dict
snake_case_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
snake_case_ = {v: k for k, v in self.encoder.items()}
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model ) + self.offset
def UpperCamelCase__ ( self ):
snake_case_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , _UpperCAmelCase ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
snake_case_ = self.sp_model.piece_to_id(_UpperCAmelCase )
return sp_id + self.offset
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
snake_case_ = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
snake_case_ = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
return 1
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 531
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCAmelCase ()-> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 531
| 1
|
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : int = '''Muhammad Umer Farooq'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''MIT'''
__SCREAMING_SNAKE_CASE : Any = '''1.0.0'''
__SCREAMING_SNAKE_CASE : Any = '''Muhammad Umer Farooq'''
__SCREAMING_SNAKE_CASE : List[Any] = '''contact@muhammadumerfarooq.me'''
__SCREAMING_SNAKE_CASE : List[str] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__()
_lowerCamelCase = []
_lowerCamelCase = domain
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_lowerCamelCase = parse.urljoin(self.domain , lowerCamelCase__ )
self.urls.append(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : str ) -> str:
return ".".join(get_sub_domain_name(lowercase_ ).split('''.''' )[-2:] )
def lowerCAmelCase_( lowercase_ : str ) -> str:
return parse.urlparse(lowercase_ ).netloc
def lowerCAmelCase_( lowercase_ : str = "https://github.com" ) -> list[str]:
_lowerCamelCase = get_domain_name(lowercase_ )
# Initialize the parser
_lowerCamelCase = Parser(lowercase_ )
try:
# Open URL
_lowerCamelCase = requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_lowerCamelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_lowerCamelCase = requests.get(lowercase_ )
# Get the valid email.
_lowerCamelCase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 661
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[8, 1_6, 3_2, 6_4] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=["stage2", "stage3", "stage4"] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=1 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = out_features
_lowerCamelCase = out_indices
_lowerCamelCase = num_groups
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase = None
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase__ : Any = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : List[str] = False
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase = layer_type
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowercase__ : Tuple = BitConfig
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
| 661
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a_ = logging.get_logger(__name__)
a_ = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """bloom"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : List[str] = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__(self , lowercase__=25_08_80 , lowercase__=64 , lowercase__=2 , lowercase__=8 , lowercase__=1e-5 , lowercase__=0.02 , lowercase__=True , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=0.0 , lowercase__=0.0 , lowercase__=1 , lowercase__=False , **lowercase__ , ):
snake_case_ : Any = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ : Union[str, Any] = kwargs.pop("""n_embed""" , lowercase__ )
snake_case_ : Any = hidden_size if n_embed is None else n_embed
snake_case_ : Optional[Any] = n_layer
snake_case_ : Any = n_head
snake_case_ : Optional[int] = layer_norm_epsilon
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = use_cache
snake_case_ : str = pretraining_tp
snake_case_ : Dict = apply_residual_connection_post_layernorm
snake_case_ : Tuple = hidden_dropout
snake_case_ : str = attention_dropout
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : Any = eos_token_id
snake_case_ : Any = slow_but_exact
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = version.parse("""1.12""")
def __init__(self , lowercase__ , lowercase__ = "default" , lowercase__ = None , lowercase__ = False , ):
super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ )
if not getattr(self._config , """pad_token_id""" , lowercase__ ):
# TODO: how to do that better?
snake_case_ : Optional[Any] = 0
@property
def __UpperCamelCase (self ):
snake_case_ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase__ , direction="""inputs""" , inverted_values_shape=lowercase__ )
snake_case_ : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __UpperCamelCase (self ):
return self._config.n_layer
@property
def __UpperCamelCase (self ):
return self._config.n_head
@property
def __UpperCamelCase (self ):
return 1e-3
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
snake_case_ : Any = super(lowercase__ , self ).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
snake_case_ : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_ , snake_case_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ : str = seqlen + 2
snake_case_ : List[Any] = self._config.hidden_size // self.num_attention_heads
snake_case_ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case_ : List[Any] = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
snake_case_ : int = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ : List[str] = ordered_inputs["""attention_mask"""].dtype
snake_case_ : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase (self ):
return 13
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : List[str] )-> Union[str, Any]:
'''simple docstring'''
try:
with open(_lowerCamelCase , '''rb''' ) as flax_state_f:
__snake_case = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] )-> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__snake_case = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
__snake_case = ''''''
__snake_case = flatten_dict(_lowerCamelCase , sep='''.''' )
__snake_case = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case = flax_key_tuple_array[:-1] + ['''weight''']
__snake_case = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case = flax_key_tuple_array[:-1] + ['''weight''']
__snake_case = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
__snake_case = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__snake_case = '''.'''.join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
__snake_case = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
return pt_model
| 24
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : Any ,lowerCamelCase : Any ,lowerCamelCase : int=3 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Any=3 ,lowerCamelCase : Dict=10 ,lowerCamelCase : Union[str, Any]=[10, 20, 30, 40] ,lowerCamelCase : Any=[1, 1, 2, 1] ,lowerCamelCase : int=True ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Dict="relu" ,lowerCamelCase : str=3 ,lowerCamelCase : Union[str, Any]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = RegNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : List[str] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : Any = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ):
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = RegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase ,atol=1E-4 ) )
| 109
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=16 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=4 ,) -> Optional[int]:
snake_case__ :Optional[Any] = parent
snake_case__ :Tuple = batch_size
snake_case__ :Tuple = seq_length
snake_case__ :Optional[int] = is_training
snake_case__ :str = use_attention_mask
snake_case__ :Dict = use_token_type_ids
snake_case__ :int = use_labels
snake_case__ :Optional[Any] = vocab_size
snake_case__ :Dict = hidden_size
snake_case__ :List[str] = num_hidden_layers
snake_case__ :Tuple = num_attention_heads
snake_case__ :Optional[int] = intermediate_size
snake_case__ :Optional[int] = hidden_act
snake_case__ :Optional[int] = hidden_dropout_prob
snake_case__ :List[str] = attention_probs_dropout_prob
snake_case__ :Any = max_position_embeddings
snake_case__ :List[Any] = type_vocab_size
snake_case__ :List[Any] = type_sequence_label_size
snake_case__ :Optional[int] = initializer_range
snake_case__ :List[Any] = num_choices
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :List[Any] = None
if self.use_attention_mask:
snake_case__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :List[str] = None
if self.use_token_type_ids:
snake_case__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ :Tuple = AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Any = config_and_inputs
snake_case__ :Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _A , unittest.TestCase ):
_A = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase_ ( self ) -> str:
for model_class_name in self.all_model_classes:
snake_case__ :Union[str, Any] = model_class_name.from_pretrained("albert-base-v2" )
snake_case__ :Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
snake_case__ :Tuple = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
snake_case__ :List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case__ :Tuple = model(UpperCamelCase ,attention_mask=UpperCamelCase )[0]
snake_case__ :Optional[int] = (1, 11, 768)
self.assertEqual(output.shape ,UpperCamelCase )
snake_case__ :Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,UpperCamelCase ,atol=1E-4 ) )
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ : List[Any] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ : List[str] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
snake_case__ : int = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Dict = char
snake_case__ : int = set(__magic_name__ )
return pairs
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = vocab_file
snake_case__ : Optional[Any] = merges_file
snake_case__ : Dict = {}
snake_case__ : Any = 0
snake_case__ : int = 1
snake_case__ : int = 2
snake_case__ : List[Any] = 3
self.add_from_file(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
snake_case__ : Any = merges_handle.read().split("""\n""" )[:-1]
snake_case__ : int = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case__ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : List[str] = {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ):
return len(self.encoder )
def __UpperCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
snake_case__ : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case__ : Any = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case__ : Optional[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : Tuple = bigram
snake_case__ : Dict = []
snake_case__ : str = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
snake_case__ : Tuple = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : List[str] = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Dict = tuple(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case__ : Union[str, Any] = get_pairs(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = """@@ """.join(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = word[:-4]
snake_case__ : Dict = word
return word
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = []
snake_case__ : Any = re.findall(R"""\S+\n?""" , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = """ """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip()
return out_string
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , __SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
snake_case__ : Tuple = f.readlines()
for lineTmp in lines:
snake_case__ : Any = lineTmp.strip()
snake_case__ : Optional[Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
snake_case__ : Optional[int] = line[:idx]
snake_case__ : Union[str, Any] = len(self.encoder )
| 38
|
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 94
| 0
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , a__ = "▁" , a__ = True , a__ = "<unk>" , a__ = "</s>" , a__ = "<pad>" , ):
_lowerCamelCase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
_lowerCamelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_lowerCamelCase = token_dict['''token''']
_lowerCamelCase = Tokenizer(Unigram() )
_lowerCamelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
_lowerCamelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
_lowerCamelCase = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
_lowerCamelCase = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
_lowerCamelCase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , a__ , a__ = 80_00 , a__ = True , ):
_lowerCamelCase = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowerCamelCase = [files]
self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def _UpperCAmelCase ( self , a__ , a__ = 80_00 , a__ = True , ):
_lowerCamelCase = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def _UpperCAmelCase ( self ):
_lowerCamelCase = json.loads(self._tokenizer.to_str() )
_lowerCamelCase = self.special_tokens['''unk''']['''id''']
_lowerCamelCase = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE ) )
| 716
|
import math
import qiskit
def _lowerCamelCase ( _a = 1 , _a = 1 , _a = 1 ):
"""simple docstring"""
if (
isinstance(_a , _a )
or isinstance(_a , _a )
or isinstance(_a , _a )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_a ) != input_a)
or (math.floor(_a ) != input_a)
or (math.floor(_a ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_lowerCamelCase = qiskit.QuantumRegister(4 , '''qr''' )
_lowerCamelCase = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_lowerCamelCase = [input_a, input_a, carry_in]
_lowerCamelCase = qiskit.QuantumCircuit(_a , _a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _a ) # measure the last two qbits
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
_lowerCamelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 297
| 0
|
"""simple docstring"""
from math import pi, sqrt
def __UpperCAmelCase ( __UpperCamelCase ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(__UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCAmelCase ( ):
assert gamma(0.5 ) == sqrt(__UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ = 1.0
while num:
a_ = float(input('Gamma of: '))
print(F"gamma({num}) = {gamma(num)}")
print('\nEnter 0 to exit...')
| 76
|
"""simple docstring"""
from math import pi, sqrt, tan
def __UpperCAmelCase ( __UpperCamelCase ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
__lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def __UpperCAmelCase ( __UpperCamelCase ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
__lowercase : int = (sidea + sidea + sidea) / 2
__lowercase : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(F"Square: {area_square(1_0) = }")
print(F"Triangle: {area_triangle(1_0, 1_0) = }")
print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(F"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(F"Circle: {area_circle(2_0) = }")
print(F"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(F"Cube: {surface_area_cube(2_0) = }")
print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(F"Sphere: {surface_area_sphere(2_0) = }")
print(F"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(F"Cone: {surface_area_cone(1_0, 2_0) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(F"Torus: {surface_area_torus(2_0, 1_0) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(F"Square: {area_reg_polygon(4, 1_0) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 76
| 1
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[Any] = TapasConfig.from_json_file(_UpperCamelCase )
# set absolute/relative position embeddings parameter
_lowercase: str = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowercase: List[Any] = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
_lowercase: List[str] = 4
_lowercase: Any = True
# hparam_utils.py hparams
_lowercase: Dict = 0.664_694
_lowercase: List[str] = 0.207_951
_lowercase: Optional[int] = 0.121_194
_lowercase: int = True
_lowercase: Optional[Any] = True
_lowercase: Any = False
_lowercase: int = 0.0_352_513
_lowercase: Optional[Any] = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowercase: List[str] = 4
_lowercase: Tuple = False
# hparam_utils.py hparams
_lowercase: Union[str, Any] = 36.4_519
_lowercase: Dict = 0.903_421
_lowercase: Optional[int] = 222.088
_lowercase: Optional[Any] = True
_lowercase: List[str] = True
_lowercase: Any = True
_lowercase: Union[str, Any] = 0.763_141
_lowercase: Any = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "TABFACT":
_lowercase: Tuple = TapasForSequenceClassification(config=_UpperCamelCase )
elif task == "MLM":
_lowercase: List[str] = TapasForMaskedLM(config=_UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_lowercase: Optional[Any] = TapasModel(config=_UpperCamelCase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCamelCase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_lowercase: str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(_UpperCamelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 721
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
A__ : List[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
A__ : Any = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[int] = set()
_lowercase: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase: Tuple = char
_lowercase: Optional[Any] = set(_UpperCamelCase )
return pairs
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
_lowercase: List[Any] = vocab_file
_lowercase: Union[str, Any] = merges_file
_lowercase: int = {}
_lowercase: Optional[int] = 0
_lowercase: Optional[Any] = 1
_lowercase: List[str] = 2
_lowercase: List[Any] = 3
self.add_from_file(A_ )
_lowercase: Tuple = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
_lowercase: int = merges_handle.read().split('''\n''' )[:-1]
_lowercase: Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
_lowercase: List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
_lowercase: Union[str, Any] = {}
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase: Optional[Any] = [self.cls_token_id]
_lowercase: Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowercase: Tuple = [self.sep_token_id]
_lowercase: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowercase_ ( self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase: List[Any] = tuple(A_ )
_lowercase: Tuple = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowercase: List[Any] = get_pairs(A_ )
if not pairs:
return token
while True:
_lowercase: List[str] = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase: Optional[int] = bigram
_lowercase: Tuple = []
_lowercase: List[str] = 0
while i < len(A_ ):
try:
_lowercase: Tuple = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase: List[Any] = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase: Union[str, Any] = tuple(A_ )
_lowercase: int = new_word
if len(A_ ) == 1:
break
else:
_lowercase: List[Any] = get_pairs(A_ )
_lowercase: Tuple = '''@@ '''.join(A_ )
_lowercase: Dict = word[:-4]
_lowercase: Tuple = word
return word
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Dict = []
_lowercase: str = re.findall(R'''\S+\n?''' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def lowercase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = ''' '''.join(A_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase: List[Any] = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase: str = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def lowercase_ ( self , A_ ) -> str:
"""simple docstring"""
if isinstance(A_ , A_ ):
try:
with open(A_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_lowercase: Dict = f.readlines()
for lineTmp in lines:
_lowercase: List[Any] = lineTmp.strip()
_lowercase: Optional[Any] = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_lowercase: List[str] = line[:idx]
_lowercase: List[str] = len(self.encoder )
| 272
| 0
|
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE_ = 6
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1901
SCREAMING_SNAKE_CASE_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE_ = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 393
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = int(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = t // 3600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=300 )-> Optional[Any]:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase ( UpperCAmelCase )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE_ = f'''{elt:.6f}''' if isinstance(UpperCAmelCase ,UpperCAmelCase ) else str(UpperCAmelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
'''simple docstring'''
UpperCAmelCase : Tuple = 5
UpperCAmelCase : Any = 0.2
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase_ : int = 300 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = total
SCREAMING_SNAKE_CASE_ = '''''' if prefix is None else prefix
SCREAMING_SNAKE_CASE_ = leave
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = width
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : str = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = value
if comment is not None:
SCREAMING_SNAKE_CASE_ = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.warmup
SCREAMING_SNAKE_CASE_ = 1
self.update_bar(lowerCAmelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE_ = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE_ = None
if value >= self.total:
SCREAMING_SNAKE_CASE_ = self.total
SCREAMING_SNAKE_CASE_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE_ = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE_ = 1
else:
SCREAMING_SNAKE_CASE_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int=None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase_ ) )) + str(lowerCAmelCase_ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE_ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE_ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE_ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE_ = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE_ = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
if self.inner_table is None:
SCREAMING_SNAKE_CASE_ = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = columns
self.inner_table.append([values[c] for c in columns] )
def _lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=300 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = NotebookProgressBar(lowerCAmelCase_ , prefix=lowerCAmelCase_ , parent=self , width=lowerCAmelCase_ )
return self.child_bar
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
self.display()
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
def _lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
SCREAMING_SNAKE_CASE_ = NotebookTrainingTracker(state.max_steps , lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
SCREAMING_SNAKE_CASE_ = False
def _lowercase ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not has_length(lowerCAmelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ = self.training_tracker.add_child(len(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_ = NotebookProgressBar(len(lowerCAmelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE_ = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE_ = state.global_step
self.training_tracker.write_line(lowerCAmelCase_ )
def _lowercase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE_ = log['''loss''']
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE_ = int(state.epoch )
else:
SCREAMING_SNAKE_CASE_ = state.global_step
SCREAMING_SNAKE_CASE_ = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
SCREAMING_SNAKE_CASE_ = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop('''total_flos''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop('''epoch''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE_ = v
else:
SCREAMING_SNAKE_CASE_ = k.split('''_''' )
SCREAMING_SNAKE_CASE_ = ''' '''.join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE_ = v
self.training_tracker.write_line(lowerCAmelCase_ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE_ = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE_ = True
def _lowercase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = None
| 393
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''CLIPImageProcessor'''
UpperCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : List[Any] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=None , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : List[str] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 703
|
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14
| 0
|
SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = start
# add current to visited
visited.append(lowercase__ )
UpperCAmelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase_ = topological_sort(lowercase__ , lowercase__ , lowercase__ )
# if all neighbors visited add current to sort
sort.append(lowercase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowercase__ ) != len(lowercase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase_ = topological_sort(lowercase__ , lowercase__ , lowercase__ )
# return sort
return sort
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 579
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A = logging.getLogger()
A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ = {'source': 'What is love ?', 'target': 'life'}
snake_case_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
snake_case_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(__UpperCamelCase , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = "pytorch" ):
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = os.path.join(__UpperCamelCase , 'output' )
snake_case_ = os.path.join(__UpperCamelCase , 'data' )
self._create_dummy_data(data_dir=__UpperCamelCase )
snake_case_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
snake_case_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
snake_case_ = os.path.join(__UpperCamelCase , 'metrics.json' )
with open(__UpperCamelCase ) as f:
snake_case_ = json.load(__UpperCamelCase )
return result
@require_torch_gpu
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 187
| 0
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = None
__UpperCamelCase : Optional[int] = BloomTokenizerFast
__UpperCamelCase : Optional[int] = BloomTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = """tokenizer_file"""
__UpperCamelCase : Optional[Any] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCAmelCase__ ( self : Tuple ):
super().setUp()
UpperCamelCase_: Union[str, Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : str , **snake_case_ : Dict ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: int = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
UpperCamelCase_: Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
UpperCamelCase_: Optional[int] = tokenizer.batch_encode_plus(snake_case_ )["input_ids"]
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase_: Dict = "This is a simple input"
UpperCamelCase_: Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
UpperCamelCase_: List[str] = ("This is a simple input", "This is a pair")
UpperCamelCase_: Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
tokenizer_r.encode(snake_case_ , max_length=snake_case_ )
tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
UpperCamelCase_: str = None # Hotfixing padding = None
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=snake_case_ )
UpperCamelCase_: Optional[Any] = next(iter(snake_case_ ) )["premise"] # pick up one data
UpperCamelCase_: Any = list(sample_data.values() )
UpperCamelCase_: List[str] = list(map(tokenizer.encode , snake_case_ ) )
UpperCamelCase_: Optional[Any] = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) for x in output_tokens]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 704
|
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670
| 0
|
'''simple docstring'''
import torch
def _lowercase ( ) -> Tuple:
"""simple docstring"""
if torch.cuda.is_available():
__UpperCAmelCase : List[Any] = torch.cuda.device_count()
else:
__UpperCAmelCase : Any = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 168
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : str = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "xlm-roberta"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = classifier_dropout
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 168
| 1
|
import re
def _A ( _UpperCamelCase ):
_UpperCAmelCase : Dict = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 714
|
# Algorithm for the pigeonhole sorting
def _A ( _UpperCamelCase ):
_UpperCAmelCase : int = min(_UpperCamelCase ) # min() finds the minimum value
_UpperCAmelCase : Any = max(_UpperCamelCase ) # max() finds the maximum value
_UpperCAmelCase : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_UpperCAmelCase : Union[str, Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_UpperCAmelCase : Tuple = 0
for count in range(_UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_UpperCAmelCase : List[str] = count + min_val
i += 1
def _A ( ):
_UpperCAmelCase : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCamelCase )
print('''Sorted order is:''' , ''' '''.join(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 416
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MobileNetV2FeatureExtractor''']
_lowercase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 659
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowercase_ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Dict:
__lowerCAmelCase =torch.load(snake_case__ , map_location="""cpu""" )
return sd
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=rename_keys_prefix ) -> Optional[Any]:
__lowerCAmelCase =OrderedDict()
__lowerCAmelCase =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase =key
for name_pair in rename_keys_prefix:
__lowerCAmelCase =new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase =new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def __lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> int:
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase ="""pretraining"""
if "vcr" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 512}
__lowerCAmelCase ="""multichoice"""
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 2048}
__lowerCAmelCase ="""vqa_advanced"""
elif "vqa" in checkpoint_path:
__lowerCAmelCase ={"""visual_embedding_dim""": 2048, """num_labels""": 3129}
__lowerCAmelCase ="""vqa"""
elif "nlvr" in checkpoint_path:
__lowerCAmelCase ={
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
__lowerCAmelCase ="""nlvr"""
__lowerCAmelCase =VisualBertConfig(**snake_case__ )
# Load State Dict
__lowerCAmelCase =load_state_dict(snake_case__ )
__lowerCAmelCase =get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
__lowerCAmelCase =VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__lowerCAmelCase =VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__lowerCAmelCase =VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__lowerCAmelCase =VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowercase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 717
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase_ = TypeVar('''T''')
lowercase_ = Union[List[T], Tuple[T, ...]]
lowercase_ = Union[T, List[T], Dict[str, T]]
lowercase_ = Union[str, bytes, os.PathLike]
| 456
| 0
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
return quad(A , 0 , A , args=(A) )[0]
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return math.pow(A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 120
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : List[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : List[Any] = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : Dict = (1 - _cos) / 2
_a : Any = 1 - _cos
_a : Any = 1 + alpha
_a : int = -2 * _cos
_a : str = 1 - alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : int = tau * frequency / samplerate
_a : int = sin(A )
_a : Union[str, Any] = cos(A )
_a : int = _sin / (2 * q_factor)
_a : Dict = (1 + _cos) / 2
_a : int = -1 - _cos
_a : Optional[int] = 1 + alpha
_a : str = -2 * _cos
_a : Dict = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : str = tau * frequency / samplerate
_a : Dict = sin(A )
_a : int = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = _sin / 2
_a : List[str] = 0
_a : Dict = -ba
_a : List[Any] = 1 + alpha
_a : Union[str, Any] = -2 * _cos
_a : List[Any] = 1 - alpha
_a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : Optional[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Tuple = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = 1 - alpha
_a : int = -2 * _cos
_a : List[Any] = 1 + alpha
_a : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Union[str, Any] = tau * frequency / samplerate
_a : str = sin(A )
_a : str = cos(A )
_a : List[Any] = _sin / (2 * q_factor)
_a : Optional[Any] = 1_0 ** (gain_db / 4_0)
_a : Dict = 1 + alpha * big_a
_a : str = -2 * _cos
_a : Tuple = 1 - alpha * big_a
_a : Tuple = 1 + alpha / big_a
_a : str = -2 * _cos
_a : Union[str, Any] = 1 - alpha / big_a
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Optional[int] = tau * frequency / samplerate
_a : List[str] = sin(A )
_a : Tuple = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[str] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : Dict = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(A ) * alpha
_a : Any = big_a * (pmc + aaa)
_a : Optional[int] = 2 * big_a * mpc
_a : Dict = big_a * (pmc - aaa)
_a : List[str] = ppmc + aaa
_a : int = -2 * pmpc
_a : Tuple = ppmc - aaa
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Dict = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Any = cos(A )
_a : int = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : List[str] = (big_a - 1) + (big_a + 1) * _cos
_a : Union[str, Any] = 2 * sqrt(A ) * alpha
_a : Optional[Any] = big_a * (ppmc + aaa)
_a : List[str] = -2 * big_a * pmpc
_a : Any = big_a * (ppmc - aaa)
_a : List[Any] = pmc + aaa
_a : Tuple = 2 * mpc
_a : List[Any] = pmc - aaa
_a : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =PriorTransformer
UpperCAmelCase_ ="hidden_states"
@property
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, embedding_dim) ).to(_A )
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, embedding_dim) ).to(_A )
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCamelCase ( self , _A=0 ) -> List[str]:
torch.manual_seed(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return (4, 8)
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return (4, 8)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.model_class(**_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
SCREAMING_SNAKE_CASE_ = model.to(_A )
if hasattr(_A , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_A )[0]
SCREAMING_SNAKE_CASE_ = output[0, :5].flatten().cpu()
print(_A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self , _A=1 , _A=768 , _A=77 , _A=0 ) -> int:
torch.manual_seed(_A )
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = embedding_dim
SCREAMING_SNAKE_CASE_ = num_embeddings
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCamelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.get_dummy_seed_input(seed=_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_A )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE_ = sample[0, :8].flatten().cpu()
print(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=1E-3 )
| 597
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase_ =dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase_ =dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase_ =dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase_ =dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase_ =dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase_ =dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase_ =dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE_ = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE_ = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset.sort('''probability''', reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset.select(range(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = dataset.remove_columns(['''label''', '''probability'''] )
SCREAMING_SNAKE_CASE_ = dataset.rename_column('''prediction''', '''label''' )
SCREAMING_SNAKE_CASE_ = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE_ = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase, index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, **__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_ = STModelArguments(model_name_or_path=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = STDataArguments(train_file=__lowerCamelCase, infer_file=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = STTrainingArguments(output_dir=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase, __lowerCamelCase ):
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Sanity checks
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE_ = args.train_file
SCREAMING_SNAKE_CASE_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE_ = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
SCREAMING_SNAKE_CASE_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
SCREAMING_SNAKE_CASE_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Show the progress bar
SCREAMING_SNAKE_CASE_ = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE_ = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''stage-1''' )
SCREAMING_SNAKE_CASE_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase, __lowerCamelCase ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''', __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', __lowerCamelCase, __lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''stage-2''' )
# Update arguments_dict
SCREAMING_SNAKE_CASE_ = model_path
SCREAMING_SNAKE_CASE_ = data_files['''train''']
SCREAMING_SNAKE_CASE_ = current_output_dir
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''', __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', __lowerCamelCase, __lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = iteration
SCREAMING_SNAKE_CASE_ = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase, '''best-checkpoint''' ) )
SCREAMING_SNAKE_CASE_ = config.idalabel
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''eval_results_best-checkpoint.json''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''test_results_best-checkpoint.json''' )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase, '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = float(json.load(__lowerCamelCase )[args.eval_metric] )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE_ = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data''']
SCREAMING_SNAKE_CASE_ = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase, os.path.join(__lowerCamelCase, F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase, os.path.join(__lowerCamelCase, F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE_ = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
SCREAMING_SNAKE_CASE_ = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', __lowerCamelCase )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase, F'''eval_results_iter-{iteration}.json''' ), os.path.join(__lowerCamelCase, '''eval_results_best-iteration.json''' ), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase, F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ), os.path.join(__lowerCamelCase, '''eval_results_best-iteration.json''' ), )
| 597
| 1
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
_lowerCAmelCase = 100
_lowerCAmelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_lowerCAmelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowercase ( _a ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCAmelCase_: set[int] = set()
UpperCAmelCase_: int
UpperCAmelCase_: int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase ( _a = 5000 ) -> int | None:
for number_to_partition in range(1 ,_a ):
if len(partition(_a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 137
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
return x + 2
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'x = 3'
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result == 3
self.assertDictEqual(__lowerCAmelCase , {'x': 3} )
SCREAMING_SNAKE_CASE_ : str = 'x = y'
SCREAMING_SNAKE_CASE_ : Tuple = {'y': 5}
SCREAMING_SNAKE_CASE_ : str = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 5, 'y': 5} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'y = add_two(x)'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'x': 3}
SCREAMING_SNAKE_CASE_ : Any = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'x = 3'
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result == 3
self.assertDictEqual(__lowerCAmelCase , {'x': 3} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE_ : str = {'x': 3}
SCREAMING_SNAKE_CASE_ : Dict = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 5} )
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 5} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'text': 'This is x: 3.'} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE_ : Dict = {'x': 8}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 8, 'y': 5} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [3, 5] )
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_list': [3, 5]} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'y = x'
SCREAMING_SNAKE_CASE_ : int = {'x': 3}
SCREAMING_SNAKE_CASE_ : Any = evaluate(__lowerCAmelCase , {} , state=__lowerCAmelCase )
assert result == 3
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'y': 3} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate(__lowerCAmelCase , {'add_two': add_two} , state=__lowerCAmelCase )
assert result == 5
self.assertDictEqual(__lowerCAmelCase , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(__lowerCAmelCase , {'range': range} , state=__lowerCAmelCase )
assert result == 2
self.assertDictEqual(__lowerCAmelCase , {'x': 2, 'i': 2} )
| 345
| 0
|
'''simple docstring'''
import torch
def __lowerCamelCase ( ):
'''simple docstring'''
if torch.cuda.is_available():
UpperCAmelCase_ = torch.cuda.device_count()
else:
UpperCAmelCase_ = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''time_series_transformer'''
lowerCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]:
# time series specific configuration
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length or prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case (UpperCAmelCase__ ) -> Union[str, Any]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCAmelCase__ , '_dynamo' ):
return False
return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Any:
UpperCamelCase_: Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase_: int = is_compiled_module(UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: List[str] = model
UpperCamelCase_: Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Dict = model.module
if not keep_fpaa_wrapper:
UpperCamelCase_: int = getattr(UpperCAmelCase__ , 'forward' )
UpperCamelCase_: List[str] = model.__dict__.pop('_original_forward' , UpperCAmelCase__ )
if original_forward is not None:
while hasattr(UpperCAmelCase__ , '__wrapped__' ):
UpperCamelCase_: Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase_: Optional[int] = forward
if getattr(UpperCAmelCase__ , '_converted_to_transformer_engine' , UpperCAmelCase__ ):
convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: Union[str, Any] = model
UpperCamelCase_: Tuple = compiled_model
return model
def snake_case () -> List[str]:
PartialState().wait_for_everyone()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase__ , UpperCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
@contextmanager
def snake_case (**UpperCAmelCase__ ) -> Any:
for key, value in kwargs.items():
UpperCamelCase_: int = str(UpperCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case (UpperCAmelCase__ ) -> str:
if not hasattr(UpperCAmelCase__ , '__qualname__' ) and not hasattr(UpperCAmelCase__ , '__name__' ):
UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , '__class__' , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCAmelCase__ , '__name__' ):
return obj.__name__
return str(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
for key, value in source.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Any = destination.setdefault(UpperCAmelCase__ , {} )
merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ )
else:
UpperCamelCase_: str = value
return destination
def snake_case (UpperCAmelCase__ = None ) -> bool:
if port is None:
UpperCamelCase_: List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 57
|
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE = 6_37_81_37.0
SCREAMING_SNAKE_CASE = 6_35_67_52.31_42_45
SCREAMING_SNAKE_CASE = 6378137
def _lowerCamelCase ( __A : float , __A : float , __A : float , __A : float ) -> float:
_UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
_UpperCAmelCase : str = atan((1 - flattening) * tan(radians(__A ) ) )
_UpperCAmelCase : List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
_UpperCAmelCase : Dict = radians(__A )
_UpperCAmelCase : List[str] = radians(__A )
# Equation
_UpperCAmelCase : Optional[Any] = sin((phi_a - phi_a) / 2 )
_UpperCAmelCase : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCAmelCase : Any = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[str] = AlbertConfig.from_json_file(__UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
lowerCamelCase_ : Optional[int] = AlbertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 720
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Dict = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "sew"
def __init__( self : Optional[Any] , __magic_name__ : List[Any]=32 , __magic_name__ : str=768 , __magic_name__ : int=12 , __magic_name__ : int=12 , __magic_name__ : Optional[int]=3072 , __magic_name__ : Optional[Any]=2 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Tuple=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-5 , __magic_name__ : List[Any]="group" , __magic_name__ : List[Any]="gelu" , __magic_name__ : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __magic_name__ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __magic_name__ : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __magic_name__ : Union[str, Any]=False , __magic_name__ : List[str]=128 , __magic_name__ : str=16 , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]=0.05 , __magic_name__ : int=10 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : str=0.0 , __magic_name__ : Optional[Any]=10 , __magic_name__ : Optional[Any]=0 , __magic_name__ : int="mean" , __magic_name__ : str=False , __magic_name__ : int=False , __magic_name__ : List[str]=256 , __magic_name__ : List[Any]=0 , __magic_name__ : Tuple=1 , __magic_name__ : Dict=2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Union[str, Any] = feat_extract_norm
lowerCamelCase_ : List[str] = feat_extract_activation
lowerCamelCase_ : int = list(__magic_name__ )
lowerCamelCase_ : List[str] = list(__magic_name__ )
lowerCamelCase_ : Optional[int] = list(__magic_name__ )
lowerCamelCase_ : Optional[Any] = conv_bias
lowerCamelCase_ : Union[str, Any] = num_conv_pos_embeddings
lowerCamelCase_ : Optional[int] = num_conv_pos_embedding_groups
lowerCamelCase_ : Union[str, Any] = len(self.conv_dim )
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : List[Any] = squeeze_factor
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : int = hidden_dropout
lowerCamelCase_ : Optional[Any] = attention_dropout
lowerCamelCase_ : List[Any] = activation_dropout
lowerCamelCase_ : Dict = feat_proj_dropout
lowerCamelCase_ : List[str] = final_dropout
lowerCamelCase_ : Any = layerdrop
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Optional[int] = apply_spec_augment
lowerCamelCase_ : Union[str, Any] = mask_time_prob
lowerCamelCase_ : Optional[int] = mask_time_length
lowerCamelCase_ : str = mask_time_min_masks
lowerCamelCase_ : List[str] = mask_feature_prob
lowerCamelCase_ : List[Any] = mask_feature_length
lowerCamelCase_ : List[Any] = mask_feature_min_masks
# ctc loss
lowerCamelCase_ : str = ctc_loss_reduction
lowerCamelCase_ : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase_ : List[Any] = use_weighted_layer_sum
lowerCamelCase_ : Optional[Any] = classifier_proj_size
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 253
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionSAGPipeline
__a =TEXT_TO_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_BATCH_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
__a =False
def UpperCamelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(_lowercase )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict , __a : Tuple=0 ):
if str(_lowercase ).startswith("mps" ):
_a = torch.manual_seed(_lowercase )
else:
_a = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_a = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : int ):
_a = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_a = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_a = '''.'''
_a = torch.manual_seed(0 )
_a = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self : List[Any] ):
_a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_a = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_a = '''.'''
_a = torch.manual_seed(0 )
_a = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self : List[str] ):
_a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_a = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_a = '''.'''
_a = torch.manual_seed(0 )
_a = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
_a = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 692
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict ={
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
SCREAMING_SNAKE_CASE__ : Dict ={
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
SCREAMING_SNAKE_CASE__ : Dict ={
'ctrl': 256,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : int = char
_lowerCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ )
return pairs
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = CONTROL_CODES
def __init__( self , _lowercase , _lowercase , _lowercase="<unk>" , **_lowercase ) -> int:
super().__init__(unk_token=_lowercase , **_lowercase )
with open(_lowercase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : List[str] = json.load(_lowercase )
_lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()}
with open(_lowercase , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase : Optional[Any] = [tuple(merge.split() ) for merge in merges]
_lowerCamelCase : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCamelCase : Tuple = {}
@property
def a__ ( self ) -> List[Any]:
return len(self.encoder )
def a__ ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , _lowercase ) -> Any:
if token in self.cache:
return self.cache[token]
_lowerCamelCase : str = tuple(_lowercase )
_lowerCamelCase : Any = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowerCamelCase : Dict = get_pairs(_lowercase )
if not pairs:
return token
while True:
_lowerCamelCase : str = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase, _lowerCamelCase : List[Any] = bigram
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Union[str, Any] = 0
while i < len(_lowercase ):
try:
_lowerCamelCase : List[str] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : List[Any] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Tuple = tuple(_lowercase )
_lowerCamelCase : Any = new_word
if len(_lowercase ) == 1:
break
else:
_lowerCamelCase : List[str] = get_pairs(_lowercase )
_lowerCamelCase : Tuple = '''@@ '''.join(_lowercase )
_lowerCamelCase : List[str] = word[:-4]
_lowerCamelCase : Dict = word
return word
def a__ ( self , _lowercase ) -> Any:
_lowerCamelCase : Tuple = []
_lowerCamelCase : Any = re.findall(R'''\S+\n?''' , _lowercase )
for token in words:
split_tokens.extend(list(self.bpe(_lowercase ).split(''' ''' ) ) )
return split_tokens
def a__ ( self , _lowercase ) -> str:
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def a__ ( self , _lowercase ) -> Optional[int]:
return self.decoder.get(_lowercase , self.unk_token )
def a__ ( self , _lowercase ) -> Any:
_lowerCamelCase : int = ''' '''.join(_lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def a__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : List[str] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Any = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '''\n''' )
_lowerCamelCase : Any = 0
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase : List[Any] = token_index
writer.write(''' '''.join(_lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 434
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_56}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 78
|
"""simple docstring"""
import json
import sys
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__snake_case ):
_UpperCamelCase = results[benchmark_name]
_UpperCamelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCamelCase = '''| metric |'''
_UpperCamelCase = '''|--------|'''
_UpperCamelCase = '''| new / old (diff) |'''
for metric_name in sorted(__snake_case ):
_UpperCamelCase = benchmark_res[metric_name]
_UpperCamelCase = metric_vals['''new''']
_UpperCamelCase = metric_vals.get('''old''', __snake_case )
_UpperCamelCase = metric_vals.get('''diff''', __snake_case )
_UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
_a = sys.argv[1]
_a = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 78
| 1
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = (DDPMParallelScheduler,)
def __UpperCamelCase ( self : List[str] , **UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase_ )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : int = self.get_scheduler_config()
lowerCamelCase_ : Tuple = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : Any = len(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = self.dummy_model()
lowerCamelCase_ : List[str] = self.dummy_sample_deter
lowerCamelCase_ : Dict = self.dummy_sample_deter + 0.1
lowerCamelCase_ : Optional[int] = self.dummy_sample_deter - 0.1
lowerCamelCase_ : str = samplea.shape[0]
lowerCamelCase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase_ : str = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
lowerCamelCase_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase_ : List[Any] = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCamelCase_ : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Optional[Any] = self.get_scheduler_config()
lowerCamelCase_ : str = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = len(UpperCamelCase_ )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCamelCase_ : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCamelCase_ : Dict = pred_prev_sample
lowerCamelCase_ : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCamelCase_ : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : int = len(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCamelCase_ : Dict = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ : str = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCamelCase_ : int = pred_prev_sample
lowerCamelCase_ : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCamelCase_ : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.scheduler_classes[0]
lowerCamelCase_ : Optional[Any] = self.get_scheduler_config()
lowerCamelCase_ : Dict = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : int = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCamelCase_ : List[Any] = -1
else:
lowerCamelCase_ : List[str] = timesteps[i + 1]
lowerCamelCase_ : Optional[Any] = scheduler.previous_timestep(UpperCamelCase_ )
lowerCamelCase_ : Tuple = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase_ : Dict = self.get_scheduler_config()
lowerCamelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : Union[str, Any] = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : List[Any] = [100, 87, 50, 1, 0]
lowerCamelCase_ : Any = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : Optional[Any] = scheduler_class(**UpperCamelCase_ )
lowerCamelCase_ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 501
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : Dict = 32
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase_ : str = torch.cuda.memory_allocated()
return self
def __exit__( self : Any , *UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase_ : Dict = torch.cuda.memory_allocated()
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
lowerCamelCase_ : Union[str, Any] = bamb(self.end - self.begin )
lowerCamelCase_ : Optional[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __snake_case (__UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ):
"""simple docstring"""
lowerCamelCase_ : int = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"""train[:{n_train}]""", '''validation''': F"""validation[:{n_val}]"""} )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : str = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase_ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCamelCase_ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
# Initialize accelerator
lowerCamelCase_ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : Tuple = config['''lr''']
lowerCamelCase_ : Any = int(config['''num_epochs'''] )
lowerCamelCase_ : str = int(config['''seed'''] )
lowerCamelCase_ : Any = int(config['''batch_size'''] )
lowerCamelCase_ : List[str] = args.model_name_or_path
set_seed(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ : Tuple = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
lowerCamelCase_ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : int = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : int = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
lowerCamelCase_ : List[Any] = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : Optional[int] = 0
# Now we train the model
lowerCamelCase_ : Optional[int] = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = model(**__UpperCAmelCase )
lowerCamelCase_ : List[str] = outputs.loss
lowerCamelCase_ : Dict = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase_ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , )
lowerCamelCase_ : List[Any] = parser.parse_args()
lowerCamelCase_ : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 501
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCAmelCase ( self ):
_lowerCAmelCase = None
_lowerCAmelCase = 20
_lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ )
# tweak scores to not be uniform anymore
_lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCAmelCase = jax.nn.softmax(UpperCAmelCase__ , axis=-1 )
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
_lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = None
_lowerCAmelCase = 10
_lowerCAmelCase = 2
# create ramp distribution
_lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
_lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCAmelCase = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCAmelCase = 5
_lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy()
_lowerCAmelCase = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = None
_lowerCAmelCase = 10
_lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
_lowerCAmelCase = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 20
_lowerCAmelCase = 4
_lowerCAmelCase = 0
_lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ )
# check that min length is applied at length 5
_lowerCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
_lowerCAmelCase = 5
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = 15
_lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 20
_lowerCAmelCase = 4
_lowerCAmelCase = 0
_lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
_lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
_lowerCAmelCase = 1
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCAmelCase = 3
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 20
_lowerCAmelCase = 4
_lowerCAmelCase = 0
_lowerCAmelCase = 5
_lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
_lowerCAmelCase = 4
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCAmelCase = 3
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 4
_lowerCAmelCase = 10
_lowerCAmelCase = 15
_lowerCAmelCase = 2
_lowerCAmelCase = 1
_lowerCAmelCase = 15
# dummy input_ids and scores
_lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
_lowerCAmelCase = input_ids.copy()
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = scores.copy()
# instantiate all dist processors
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = 10
# no processor list
_lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# with processor list
_lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 4
_lowerCAmelCase = 10
_lowerCAmelCase = 15
_lowerCAmelCase = 2
_lowerCAmelCase = 1
_lowerCAmelCase = 15
# dummy input_ids and scores
_lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
_lowerCAmelCase = input_ids.copy()
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = scores.copy()
# instantiate all dist processors
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
_lowerCAmelCase = jax.jit(UpperCAmelCase__ )
_lowerCAmelCase = jax.jit(UpperCAmelCase__ )
_lowerCAmelCase = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 721
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase :
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : str=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=3_7 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=5_1_2 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = projection_dim
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
_snake_case = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = TFDPRContextEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = TFDPRReader(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A__ : Optional[int] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
A__ : List[Any] = False
A__ : Any = False
A__ : Optional[int] = False
A__ : Optional[Any] = False
A__ : str = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = TFDPRModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRQuestionEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRReader.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
_snake_case = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case = model(__lowerCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = StableDiffusionSAGPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Optional[Any]:
torch.manual_seed(0)
_lowerCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCamelCase : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_lowerCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
_lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
_lowerCamelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]:
if str(SCREAMING_SNAKE_CASE).startswith("""mps"""):
_lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""")
_lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE)
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = """."""
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""")
_lowerCamelCase : Dict = output.images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
_lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE)
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = """."""
_lowerCamelCase : List[str] = torch.manual_seed(0)
_lowerCamelCase : int = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""")
_lowerCamelCase : Any = output.images
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
_lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE)
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = """."""
_lowerCamelCase : Union[str, Any] = torch.manual_seed(0)
_lowerCamelCase : Optional[int] = sag_pipe(
[prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 88
| 0
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = 0
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCamelCase_ ) , 0 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type='''bert''' , use_fast=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type='''gpt2''' , use_fast=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowerCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowerCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowerCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase__ = TOKENIZER_MAPPING.values()
lowerCAmelCase__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCamelCase_ )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , lowerCamelCase_ )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = '''Hello, world. How are you?'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Check we can load the tokenizer config of an online model.
lowerCAmelCase__ = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase__ = config.pop('''_commit_hash''' , lowerCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase__ = get_tokenizer_config(lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = get_tokenizer_config(lowerCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ )
lowerCAmelCase__ = CustomTokenizer.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
# Can register in two steps
AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoTokenizer.register(lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = False
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = NewTokenizer
lowercase__ : Optional[int] = False
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_ , slow_tokenizer_class=lowerCamelCase_ )
AutoTokenizer.register(lowerCamelCase_ , fast_tokenizer_class=lowerCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowerCamelCase_ , use_fast=lowerCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
with self.assertRaisesRegex(
lowerCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ , revision='''aaaaaa''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# Make sure we have cached the tokenizer.
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 98
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCAmelCase = logging.getLogger(__name__)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> Dict:
super().__init__(
lowerCamelCase_ , question_encoder_tokenizer=lowerCamelCase_ , generator_tokenizer=lowerCamelCase_ , index=lowerCamelCase_ , init_retrieval=lowerCamelCase_ , )
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
lowerCAmelCase__ = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowerCAmelCase__ = str(distributed_port + 1 )
lowerCAmelCase__ = dist.new_group(ranks=lowerCamelCase_ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=torch.floataa ) -> Union[str, Any]:
lowerCAmelCase__ = torch.empty(lowerCamelCase_ , dtype=lowerCamelCase_ )
dist.scatter(lowerCamelCase_ , src=0 , scatter_list=lowerCamelCase_ , group=self.process_group )
return target_tensor
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowerCAmelCase__ = next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCamelCase_ )
return ifname
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(lowerCamelCase_ , lowerCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase_ )
# distributed training
lowerCAmelCase__ = dist.get_world_size(group=self.process_group )
# gather logic
lowerCAmelCase__ = None
if self._is_main():
lowerCAmelCase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCamelCase_ )]
dist.gather(torch.tensor(lowerCamelCase_ ) , dst=0 , gather_list=lowerCamelCase_ , group=self.process_group )
# scatter logic
lowerCAmelCase__ = question_hidden_states.shape[0]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if self._is_main():
assert len(lowerCamelCase_ ) == world_size
lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(torch.cat(lowerCamelCase_ ).numpy() , lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ), torch.tensor(lowerCamelCase_ )
lowerCAmelCase__ = self._chunk_tensor(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self._chunk_tensor(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self._scattered(lowerCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
lowerCAmelCase__ = self._scattered(lowerCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCamelCase_ )
| 98
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = KandinskyInpaintPipeline
snake_case__ :str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ :List[str] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ :List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ :Tuple = False
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return 32
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return 100
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCAmelCase__ = MultilingualCLIP(__magic_name__ )
lowerCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ = UNetaDConditionModel(**__magic_name__ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="epsilon" , thresholding=__magic_name__ , )
lowerCAmelCase__ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(__magic_name__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
else:
lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__magic_name__ )
lowerCAmelCase__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(__magic_name__ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = "a hat"
lowerCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
lowerCAmelCase__ = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ = pipeline(
__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 48
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_UpperCAmelCase : Dict = {"""facebook/blenderbot_small-90M""": 512}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
snake_case_ = set(UpperCamelCase__ )
return pairs
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case , snake_case , snake_case="__start__" , snake_case="__end__" , snake_case="__unk__" , snake_case="__null__" , **snake_case , ):
super().__init__(unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
snake_case_ = json.load(snake_case )
snake_case_ = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
snake_case_ = merges_handle.read().split('\n' )[1:-1]
snake_case_ = [tuple(merge.split() ) for merge in merges]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = {}
@property
def a ( self ):
return len(self.encoder )
def a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case ):
if token in self.cache:
return self.cache[token]
snake_case_ = re.sub('([.,!?()])' , R' \1' , snake_case )
snake_case_ = re.sub('(\')' , R' \1 ' , snake_case )
snake_case_ = re.sub(R'\s{2,}' , ' ' , snake_case )
if "\n" in token:
snake_case_ = token.replace('\n' , ' __newln__' )
snake_case_ = token.split(' ' )
snake_case_ = []
for token in tokens:
if not len(snake_case ):
continue
snake_case_ = token.lower()
snake_case_ = tuple(snake_case )
snake_case_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
snake_case_ = get_pairs(snake_case )
if not pairs:
words.append(snake_case )
continue
while True:
snake_case_ = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(snake_case ):
try:
snake_case_ = word.index(snake_case , snake_case )
new_word.extend(word[i:j] )
snake_case_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ = tuple(snake_case )
snake_case_ = new_word
if len(snake_case ) == 1:
break
else:
snake_case_ = get_pairs(snake_case )
snake_case_ = '@@ '.join(snake_case )
snake_case_ = word[:-4]
snake_case_ = word
words.append(snake_case )
return " ".join(snake_case )
def a ( self , snake_case ):
snake_case_ = []
snake_case_ = re.findall(R'\S+\n?' , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(' ' ) ) )
return split_tokens
def a ( self , snake_case ):
snake_case_ = token.lower()
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def a ( self , snake_case ):
return self.decoder.get(snake_case , self.unk_token )
def a ( self , snake_case ):
snake_case_ = ' '.join(snake_case ).replace('@@ ' , '' ).strip()
return out_string
def a ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
snake_case_ = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
snake_case_ = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
| 701
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_UpperCAmelCase : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_UpperCAmelCase : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_UpperCAmelCase : Tuple = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_UpperCAmelCase : Union[str, Any] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_UpperCAmelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_UpperCAmelCase : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 108
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__SCREAMING_SNAKE_CASE : Optional[int] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__SCREAMING_SNAKE_CASE : List[Any] = requests.get(image_url).content
__SCREAMING_SNAKE_CASE : Optional[int] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 452
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] ="transfo-xl"
_UpperCAmelCase : str =["mems"]
_UpperCAmelCase : Optional[int] ={
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , lowerCAmelCase : Tuple=26_77_35 , lowerCAmelCase : List[Any]=[2_00_00, 4_00_00, 20_00_00] , lowerCAmelCase : Dict=10_24 , lowerCAmelCase : List[str]=10_24 , lowerCAmelCase : int=16 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : str=40_96 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Any=False , lowerCAmelCase : List[str]=18 , lowerCAmelCase : Any=16_00 , lowerCAmelCase : List[str]=10_00 , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Tuple=-1 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]="normal" , lowerCAmelCase : Tuple=0.0_1 , lowerCAmelCase : Optional[int]=0.0_1 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Dict=1e-5 , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : List[str] , ):
A_ = vocab_size
A_ = []
self.cutoffs.extend(lowerCAmelCase )
if proj_share_all_but_first:
A_ = [False] + [True] * len(self.cutoffs )
else:
A_ = [False] + [False] * len(self.cutoffs )
A_ = d_model
A_ = d_embed
A_ = d_head
A_ = d_inner
A_ = div_val
A_ = pre_lnorm
A_ = n_layer
A_ = n_head
A_ = mem_len
A_ = same_length
A_ = attn_type
A_ = clamp_len
A_ = sample_softmax
A_ = adaptive
A_ = dropout
A_ = dropatt
A_ = untie_r
A_ = init
A_ = init_range
A_ = proj_init_std
A_ = init_std
A_ = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase , **lowerCAmelCase )
@property
def _UpperCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 452
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
"""simple docstring"""
def __init__( self : Any , _A : Tuple , _A : Optional[int]=1_3 , _A : Tuple=7 , _A : int=True , _A : Optional[int]=True , _A : int=True , _A : Optional[int]=True , _A : Dict=9_9 , _A : Any=1_6 , _A : Any=3_6 , _A : List[Any]=6 , _A : Optional[int]=6 , _A : int=6 , _A : Dict=3_7 , _A : List[Any]="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : Any=5_1_2 , _A : Optional[Any]=1_6 , _A : List[str]=2 , _A : Tuple=0.02 , _A : List[Any]=3 , _A : Any=4 , _A : List[str]=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = embedding_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_hidden_groups
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCAmelCase ( self : Any , _A : str , _A : str , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Dict , _A : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AlbertModel(config=_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A)
_SCREAMING_SNAKE_CASE = model(_A , token_type_ids=_A)
_SCREAMING_SNAKE_CASE = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCAmelCase ( self : Dict , _A : Any , _A : Optional[Any] , _A : str , _A : str , _A : Optional[Any] , _A : int , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def _lowerCAmelCase ( self : Dict , _A : int , _A : str , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : str , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : List[Any] , _A : Tuple , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self : Tuple , _A : List[Any] , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[int] , _A : str , _A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : Optional[Any] , _A : List[str] , _A : List[Any] , _A : List[str] , _A : List[str] , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=_A)
model.to(_A)
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , unittest.TestCase ):
"""simple docstring"""
a = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def _lowerCAmelCase ( self : Optional[int] , _A : int , _A : str , _A : str=False):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super()._prepare_for_class(_A , _A , return_labels=_A)
if return_labels:
if model_class in get_values(_A):
_SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A)
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A)
return inputs_dict
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AlbertModelTester(self)
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=3_7)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*_A)
@slow
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(_A)
self.assertIsNotNone(_A)
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("""albert-base-v2""")
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A)[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , _A)
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4))
| 714
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "openai-gpt"
lowerCAmelCase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , UpperCAmelCase=40478 , UpperCAmelCase=512 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase="cls_index" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> List[str]:
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = afn
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_first_dropout
_snake_case = summary_proj_to_labels
super().__init__(**UpperCAmelCase )
| 585
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585
| 1
|
def UpperCamelCase (lowercase_: int = 1000 ) -> int:
A__ : Dict = -1
A__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A__ : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
A__ : List[str] = n - a - b
if c * c == (a * a + b * b):
A__ : Tuple = a * b * c
if candidate >= product:
A__ : Dict = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64
|
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 1
|
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Union[str, Any]=28123 ) -> str:
a_ : int = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
a_ : Optional[int] = set()
a_ : int = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 473
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """roc_bert"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0_5_2_2 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=3_0_7_2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=9_1_0 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2_4_8_5_8 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
a_ : Optional[int] = vocab_size
a_ : str = max_position_embeddings
a_ : List[Any] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = initializer_range
a_ : str = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : str = use_cache
a_ : Tuple = enable_pronunciation
a_ : Dict = enable_shape
a_ : int = pronunciation_embed_dim
a_ : List[Any] = pronunciation_vocab_size
a_ : int = shape_embed_dim
a_ : List[str] = shape_vocab_size
a_ : List[Any] = concat_input
a_ : List[str] = position_embedding_type
a_ : Any = classifier_dropout
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 473
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = 3
__lowerCAmelCase = 250
__lowerCAmelCase = ids_tensor((batch_size, length) , snake_case_ )
__lowerCAmelCase = torch.ones((batch_size, length) , device=snake_case_ , dtype=torch.float ) / length
return input_ids, scores
def A__ ( self ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
__lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = MaxLengthCriteria(max_length=10 )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self._get_tensors(5 )
__lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
__lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def A__ ( self ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(snake_case_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(snake_case_ ) , 1 )
| 573
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyInpaintPipeline
_snake_case = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case = False
@property
def A__ ( self ) -> Any:
return 32
@property
def A__ ( self ) -> Tuple:
return 32
@property
def A__ ( self ) -> List[str]:
return self.time_input_dim
@property
def A__ ( self ) -> int:
return self.time_input_dim * 4
@property
def A__ ( self ) -> Optional[int]:
return 100
@property
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ) -> Tuple:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__lowerCAmelCase = MultilingualCLIP(snake_case_ )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**snake_case_ )
return model
@property
def A__ ( self ) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case_ , )
__lowerCAmelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> List[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__lowerCAmelCase = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase = 0
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ) -> Dict:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**snake_case_ )
__lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(snake_case_ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A__ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((768, 768) , dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
__lowerCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 573
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase_ : List[Any] = TypeVar('''KEY''')
UpperCAmelCase_ : str = TypeVar('''VAL''')
@dataclass(frozen=__lowercase , slots=__lowercase)
class lowerCAmelCase ( Generic[KEY, VAL]):
__lowercase : KEY
__lowercase : VAL
class lowerCAmelCase ( _Item):
def __init__( self ) -> None:
'''simple docstring'''
super().__init__(_a , _a )
def __bool__( self ) -> bool:
'''simple docstring'''
return False
UpperCAmelCase_ : List[str] = _DeletedItem()
class lowerCAmelCase ( MutableMapping[KEY, VAL]):
def __init__( self , __SCREAMING_SNAKE_CASE = 8 , __SCREAMING_SNAKE_CASE = 0.75 ) -> None:
'''simple docstring'''
__snake_case = initial_block_size
__snake_case = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__snake_case = capacity_factor
__snake_case = 0
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return hash(_a ) % len(self._buckets )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__snake_case = self._buckets[ind]
if not stored:
__snake_case = _Item(_a , _a )
self._len += 1
return True
elif stored.key == key:
__snake_case = _Item(_a , _a )
return True
else:
return False
def lowerCAmelCase ( self ) -> bool:
'''simple docstring'''
__snake_case = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_a )
def lowerCAmelCase ( self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__snake_case = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
__snake_case = self._buckets
__snake_case = [None] * new_size
__snake_case = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Iterator[int]:
'''simple docstring'''
__snake_case = self._get_bucket_index(_a )
for _ in range(len(self._buckets ) ):
yield ind
__snake_case = self._get_next_ind(_a )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(_a ):
if self._try_set(_a , _a , _a ):
break
def __setitem__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(_a , _a )
def __delitem__( self , __SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(_a ):
__snake_case = self._buckets[ind]
if item is None:
raise KeyError(_a )
if item is _deleted:
continue
if item.key == key:
__snake_case = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __SCREAMING_SNAKE_CASE ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(_a ):
__snake_case = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_a )
def __len__( self ) -> int:
'''simple docstring'''
return self._len
def __iter__( self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
'''simple docstring'''
__snake_case = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 24
|
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 0
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = int(_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3_0_0 ) -> int:
"""simple docstring"""
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCAmelCase : str = F"""{elt:.6f}""" if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _UpperCamelCase:
__A: Tuple = 5
__A: Any = 0.2
def __init__( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple = None , _lowerCamelCase : Optional[Any] = True , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Dict = 3_00 , ):
_UpperCAmelCase : List[str] = total
_UpperCAmelCase : str = '' if prefix is None else prefix
_UpperCAmelCase : Optional[int] = leave
_UpperCAmelCase : int = parent
_UpperCAmelCase : List[Any] = width
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[Any] = None
def a__ ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict = False , _lowerCamelCase : Tuple = None ):
_UpperCAmelCase : Optional[int] = value
if comment is not None:
_UpperCAmelCase : Optional[Any] = comment
if self.last_value is None:
_UpperCAmelCase : Optional[Any] = time.time()
_UpperCAmelCase : Dict = value
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Tuple = self.warmup
_UpperCAmelCase : Union[str, Any] = 1
self.update_bar(_A )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCAmelCase : str = time.time()
_UpperCAmelCase : int = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCAmelCase : int = self.elapsed_time / (value - self.start_value)
else:
_UpperCAmelCase : List[str] = None
if value >= self.total:
_UpperCAmelCase : Tuple = self.total
_UpperCAmelCase : Optional[int] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCAmelCase : int = self.average_time_per_item * (self.total - value)
self.update_bar(_A )
_UpperCAmelCase : Union[str, Any] = value
_UpperCAmelCase : Optional[Any] = current_time
if self.average_time_per_item is None:
_UpperCAmelCase : Any = 1
else:
_UpperCAmelCase : Optional[int] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a__ ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=None ):
_UpperCAmelCase : List[Any] = ' ' * (len(str(self.total ) ) - len(str(_A ) )) + str(_A )
if self.elapsed_time is None:
_UpperCAmelCase : Any = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_UpperCAmelCase : List[Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_UpperCAmelCase : List[Any] = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def a__ ( self : Any ):
_UpperCAmelCase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCAmelCase : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=_A )
else:
self.output.update(disp.HTML(self.html_code ) )
def a__ ( self : List[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _UpperCamelCase( a__ ):
def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Any=None ):
super().__init__(_A )
_UpperCAmelCase : Optional[Any] = None if column_names is None else [column_names]
_UpperCAmelCase : Optional[Any] = None
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCAmelCase : Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=_A )
else:
self.output.update(disp.HTML(self.html_code ) )
def a__ ( self : List[str] , _lowerCamelCase : List[Any] ):
if self.inner_table is None:
_UpperCAmelCase : Dict = [list(values.keys() ), list(values.values() )]
else:
_UpperCAmelCase : Union[str, Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_A )
_UpperCAmelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def a__ ( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : List[Any]=3_00 ):
_UpperCAmelCase : Optional[Any] = NotebookProgressBar(_A , prefix=_A , parent=self , width=_A )
return self.child_bar
def a__ ( self : str ):
_UpperCAmelCase : Any = None
self.display()
class _UpperCamelCase( a__ ):
def __init__( self : str ):
_UpperCAmelCase : int = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : List[Any] = False
def a__ ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Union[str, Any] ):
_UpperCAmelCase : int = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
_UpperCAmelCase : List[str] = NotebookTrainingTracker(state.max_steps , _A )
def a__ ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ):
_UpperCAmelCase : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_UpperCAmelCase : int = False
def a__ ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Optional[int] ):
if not has_length(_A ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCAmelCase : str = self.training_tracker.add_child(len(_A ) )
else:
_UpperCAmelCase : str = NotebookProgressBar(len(_A ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a__ ( self : int , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , **_lowerCamelCase : Dict ):
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCAmelCase : Tuple = None
def a__ ( self : Any , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] ):
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCAmelCase : str = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCAmelCase : Dict = state.global_step
self.training_tracker.write_line(_A )
def a__ ( self : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Any=None , **_lowerCamelCase : Any ):
if self.training_tracker is not None:
_UpperCAmelCase : Any = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCAmelCase : str = log['loss']
break
if self.first_column == "Epoch":
_UpperCAmelCase : Dict = int(state.epoch )
else:
_UpperCAmelCase : Optional[Any] = state.global_step
_UpperCAmelCase : Optional[Any] = 'eval'
for k in metrics:
if k.endswith("_loss" ):
_UpperCAmelCase : List[Any] = re.sub(R"\_loss$" , "" , _A )
_UpperCAmelCase : Dict = metrics.pop("total_flos" , _A )
_UpperCAmelCase : int = metrics.pop("epoch" , _A )
_UpperCAmelCase : str = metrics.pop(f"""{metric_key_prefix}_runtime""" , _A )
_UpperCAmelCase : Dict = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , _A )
_UpperCAmelCase : Tuple = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , _A )
_UpperCAmelCase : int = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , _A )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
_UpperCAmelCase : Optional[int] = v
else:
_UpperCAmelCase : Tuple = k.split("_" )
_UpperCAmelCase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]] )
_UpperCAmelCase : str = v
self.training_tracker.write_line(_A )
self.training_tracker.remove_child()
_UpperCAmelCase : Tuple = None
# Evaluation takes a long time so we should force the next update.
_UpperCAmelCase : Union[str, Any] = True
def a__ ( self : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , **_lowerCamelCase : Dict ):
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=_A )
_UpperCAmelCase : List[Any] = None
| 716
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__lowerCamelCase = logging.getLogger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Tuple=None ):
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
_UpperCAmelCase : str = None
def a__ ( self : Optional[Any] , _lowerCamelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_UpperCAmelCase : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase : int = str(distributed_port + 1 )
_UpperCAmelCase : Union[str, Any] = dist.new_group(ranks=_lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def a__ ( self : Tuple ):
return dist.get_rank(group=self.process_group ) == 0
def a__ ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : str=torch.floataa ):
_UpperCAmelCase : int = torch.empty(_lowerCamelCase , dtype=_lowerCamelCase )
dist.scatter(_lowerCamelCase , src=0 , scatter_list=_lowerCamelCase , group=self.process_group )
return target_tensor
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase : Union[str, Any] = next((addr for addr in addrs if addr.startswith("e" )) , _lowerCamelCase )
return ifname
def a__ ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : int ):
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
# distributed training
_UpperCAmelCase : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCAmelCase : int = None
if self._is_main():
_UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCamelCase )]
dist.gather(torch.tensor(_lowerCamelCase ) , dst=0 , gather_list=_lowerCamelCase , group=self.process_group )
# scatter logic
_UpperCAmelCase : str = question_hidden_states.shape[0]
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
if self._is_main():
assert len(_lowerCamelCase ) == world_size
_UpperCAmelCase ,_UpperCAmelCase : Any = self._main_retrieve(torch.cat(_lowerCamelCase ).numpy() , _lowerCamelCase )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
_UpperCAmelCase : Tuple = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : List[Any] = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : str = self._scattered(_lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCAmelCase : Tuple = self._scattered(_lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCamelCase )
| 328
| 0
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if num <= 0:
raise ValueError("math domain error" )
return quad(lowerCamelCase_ , 0 , lowerCamelCase_ , args=(lowerCamelCase_) )[0]
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
return math.pow(lowerCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 159
|
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ['''speech''']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''speech'''] )
class __lowercase ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : int = ['''speech''']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''speech'''] )
| 542
| 0
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase : Optional[Any] = float("nan")
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = sys.stdout
__UpperCamelCase = open(_A , 'a' )
def __getattr__( self , __UpperCAmelCase ):
'''simple docstring'''
return getattr(self.stdout , _A )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _A , 0 , re.M ) )
def A ( snake_case :Optional[Any]=8_0 , snake_case :List[Any]=False ) -> Dict:
__UpperCamelCase = []
# deal with critical env vars
__UpperCamelCase = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
__UpperCamelCase = os.environ.get(UpperCAmelCase__ , UpperCAmelCase__ )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
__UpperCamelCase = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCAmelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__UpperCamelCase = []
__UpperCamelCase = ''
while len(UpperCAmelCase__ ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCAmelCase__ ) == 0 or len(UpperCAmelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCAmelCase__ )
__UpperCamelCase = ''
return "\\\n".join(UpperCAmelCase__ )
def A ( snake_case :Any , snake_case :Any ) -> Tuple:
__UpperCamelCase = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__UpperCamelCase = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
__UpperCamelCase = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A ( snake_case :Union[str, Any] , snake_case :Dict , snake_case :List[str] , snake_case :List[Any] , snake_case :Any , snake_case :Optional[int] , snake_case :List[Any] ) -> Dict:
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
__UpperCamelCase = subprocess.run(UpperCAmelCase__ , capture_output=UpperCAmelCase__ , text=UpperCAmelCase__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__UpperCamelCase = variation.replace(' ' , '-' )
with open(Path(UpperCAmelCase__ ) / f'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCAmelCase__ ) / f'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase = json.load(UpperCAmelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A ( snake_case :Optional[int] , snake_case :Optional[Any] , snake_case :Tuple , snake_case :Dict , snake_case :int , snake_case :Union[str, Any] , snake_case :int , snake_case :List[str] , snake_case :List[Any] , snake_case :int , ) -> str:
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = f'{id}: {variation:<{longest_variation_len}}'
__UpperCamelCase = f'{preamble}: '
__UpperCamelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCAmelCase__ ) , desc=UpperCAmelCase__ , leave=UpperCAmelCase__ ):
__UpperCamelCase = process_run_single(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCamelCase = single_run_metrics[target_metric_key]
if not math.isnan(UpperCAmelCase__ ):
metrics.append(UpperCAmelCase__ )
results.append(UpperCAmelCase__ )
outcome += "✓"
else:
outcome += "✘"
__UpperCamelCase = f'\33[2K\r{outcome}'
if len(UpperCAmelCase__ ) > 0:
__UpperCamelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__UpperCamelCase = round(mean_metrics[target_metric_key] , 2 )
__UpperCamelCase = f'{outcome} {mean_target}'
if len(UpperCAmelCase__ ) > 1:
results_str += f' {tuple(round(UpperCAmelCase__ , 2 ) for x in results )}'
print(UpperCAmelCase__ )
__UpperCamelCase = variation
return mean_metrics
else:
print(UpperCAmelCase__ )
return {variation_key: variation, target_metric_key: nan}
def A ( ) -> List[str]:
__UpperCamelCase = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n'
def A ( snake_case :Any , snake_case :int , snake_case :List[Any] , snake_case :Optional[int] , snake_case :List[Any] ) -> Tuple:
__UpperCamelCase = pd.DataFrame(UpperCAmelCase__ )
__UpperCamelCase = 'variation'
__UpperCamelCase = 'diff_%'
__UpperCamelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__UpperCamelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCAmelCase__ ):
# as a fallback, use the minimal value as the sentinel
__UpperCamelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCAmelCase__ ):
__UpperCamelCase = df.apply(
lambda snake_case : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__UpperCamelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__UpperCamelCase = df.reindex(UpperCAmelCase__ , axis='columns' ) # reorder cols
# capitalize
__UpperCamelCase = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__UpperCamelCase = df.rename(lambda snake_case : c.replace('_' , '<br>' ) , axis='columns' )
__UpperCamelCase = df.rename(lambda snake_case : c.replace('_' , '\n' ) , axis='columns' )
__UpperCamelCase = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCAmelCase__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCAmelCase__ , floatfmt='.2f' )]
print('\n\n'.join(UpperCAmelCase__ ) )
def A ( ) -> List[str]:
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , nargs='+' , required=UpperCAmelCase__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCAmelCase__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCAmelCase__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCAmelCase__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCAmelCase__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.output_dir
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
__UpperCamelCase = get_base_command(UpperCAmelCase__ , UpperCAmelCase__ )
# split each dimension into its --foo variations
__UpperCamelCase = [list(map(str.strip , re.split(r'\|' , UpperCAmelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__UpperCamelCase = list(map(str.strip , map(' '.join , itertools.product(*UpperCAmelCase__ ) ) ) )
__UpperCamelCase = max(len(UpperCAmelCase__ ) for x in variations )
# split wanted keys
__UpperCamelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
__UpperCamelCase = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
__UpperCamelCase = Tee(UpperCAmelCase__ )
print(f'\n*** Running {len(UpperCAmelCase__ )} benchmarks:' )
print(f'Base command: {" ".join(UpperCAmelCase__ )}' )
__UpperCamelCase = 'variation'
__UpperCamelCase = []
for id, variation in enumerate(tqdm(UpperCAmelCase__ , desc='Total completion: ' , leave=UpperCAmelCase__ ) ):
__UpperCamelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , args.target_metric_key , UpperCAmelCase__ , args.repeat_times , UpperCAmelCase__ , args.verbose , ) )
process_results(UpperCAmelCase__ , args.target_metric_key , UpperCAmelCase__ , args.base_variation , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 714
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[str] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''MaskFormerFeatureExtractor''']
_A = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
_A = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 182
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0.999 ,SCREAMING_SNAKE_CASE_="cosine" ,) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : List[str] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa )
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : int = 1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.00_01 , lowerCamelCase = 0.02 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = "epsilon" , lowerCamelCase = 1.0 , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , lowerCamelCase ) is not None:
lowercase__ : Any = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
lowercase__ : Optional[int] = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase__ : Optional[int] = torch.tensor(lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase__ : int = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ : Optional[Any] = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase__ : List[str] = 1.0 - self.betas
lowercase__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase__ : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase__ : Optional[int] = 1.0
# setable values
lowercase__ : int = None
lowercase__ : int = torch.from_numpy(np.arange(0 , lowerCamelCase ).copy().astype(np.intaa ) )
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Optional[Any]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase__ : Optional[Any] = num_inference_steps
lowercase__ : Dict = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ : Dict = (np.arange(0 , lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase__ : Any = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
self.timesteps += self.config.steps_offset
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase__ : Optional[int] = self.alphas_cumprod[timestep]
lowercase__ : Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase__ : Optional[int] = model_output
elif self.config.prediction_type == "sample":
lowercase__ : Any = model_output
lowercase__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase__ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase__ : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase__ : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : int = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 397
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class __A:
def __init__( self : str , __UpperCamelCase : str ):
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self : str , __UpperCamelCase : Union[str, Any] ):
return self.key < other.key
def __repr__( self : Optional[int] ):
return self.id
def lowercase__ ( self : Any , __UpperCamelCase : Any ):
self.neighbors.append(A_ )
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ):
lowerCamelCase_ = weight
def __lowerCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> list:
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> Iterator[tuple]:
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
lowerCamelCase_ = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __lowerCAmelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE = '''text_classifier'''
SCREAMING_SNAKE_CASE = AutoTokenizer
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE = ['''text''']
def lowercase__ ( self : Union[str, Any] ):
super().setup()
lowerCamelCase_ = self.model.config
lowerCamelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
lowerCamelCase_ = int(__UpperCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = labels
return self.pre_processor(
[text] * len(__UpperCamelCase ) , [F'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 103
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : Optional[int]=10 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : str=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[Any]=10 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Optional[int]=0.9 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = mask_ratio
_UpperCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_UpperCAmelCase = int(mask_ratio * self.seq_length )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : str ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = VideoMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = VideoMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCAmelCase = torch.ones((self.num_masks,) )
_UpperCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_UpperCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
# model only returns predictions for masked patches
_UpperCAmelCase = mask.sum().item()
_UpperCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_snake_case : int = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : Dict = False
_snake_case : List[Any] = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = VideoMAEModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = copy.deepcopy(__lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCAmelCase = torch.ones((self.model_tester.num_masks,) )
_UpperCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_UpperCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_UpperCAmelCase = bool_masked_pos.to(__lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(__lowerCAmelCase ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = VideoMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase = len(__lowerCAmelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase_ ( self : Dict ):
def check_hidden_states_output(__lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
_UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : str ):
pass
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
_UpperCAmelCase = np.load(lowercase )
return list(lowercase )
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
__lowerCAmelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__lowerCAmelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# add boolean mask, indicating which patches to mask
_UpperCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = torch.Size([1, 1408, 1536] )
_UpperCAmelCase = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=__lowerCAmelCase )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_UpperCAmelCase = torch.tensor([0.5_142] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_UpperCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__lowerCAmelCase ).to(
__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(torch.tensor([0.6_469] ) , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCAmelCase , atol=1e-4 ) )
| 277
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'timm_backbone'
def __init__( self : List[Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = backbone
_UpperCAmelCase = num_channels
_UpperCAmelCase = features_only
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = True
_UpperCAmelCase = out_indices if out_indices is not None else (-1,)
| 277
| 1
|
"""simple docstring"""
from math import factorial
def lowerCamelCase_ ( __lowerCAmelCase = 20 ) -> int:
'''simple docstring'''
lowerCamelCase__ =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase__ =n // 2
return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a =int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 132
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a =logging.get_logger(__name__)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
def run_func(__lowerCAmelCase ):
@wraps(__lowerCAmelCase )
def run_in_eager_mode(*__lowerCAmelCase , **__lowerCAmelCase ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
@wraps(__lowerCAmelCase )
@tf.function(experimental_compile=__lowerCAmelCase )
def run_in_graph_mode(*__lowerCAmelCase , **__lowerCAmelCase ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> ["tf.Tensor"]:
'''simple docstring'''
lowerCamelCase__ =random.Random()
lowerCamelCase__ =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : TensorFlowBenchmarkArguments
A__ : PretrainedConfig
A__ : str = "TensorFlow"
@property
def _a ( self ):
return tf.__version__
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# initialize GPU on separate process
lowerCamelCase__ =self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCamelCase__ =self._prepare_inference_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self._measure_speed(_inference )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCamelCase__ =self._prepare_train_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self._measure_speed(_train )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCamelCase )
lowerCamelCase__ =self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCamelCase__ =self._prepare_inference_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self._measure_memory(_inference )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCamelCase )
lowerCamelCase__ =self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCamelCase__ =self._prepare_train_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self._measure_memory(_train )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowerCamelCase__ =(
hasattr(_lowerCamelCase , "architectures" )
and isinstance(config.architectures , _lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ ="TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ =__import__("transformers" , fromlist=[model_class] )
lowerCamelCase__ =getattr(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =model_cls(_lowerCamelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowerCamelCase__ =TF_MODEL_MAPPING[config.__class__](_lowerCamelCase )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ =config.vocab_size if hasattr(_lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
lowerCamelCase__ =random_input_ids(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase , training=_lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_lowerCamelCase , training=_lowerCamelCase )
lowerCamelCase__ =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowerCamelCase__ =(
hasattr(_lowerCamelCase , "architectures" )
and isinstance(config.architectures , _lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ ="TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ =__import__("transformers" , fromlist=[model_class] )
lowerCamelCase__ =getattr(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =model_cls(_lowerCamelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowerCamelCase__ =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_lowerCamelCase )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ =config.vocab_size if hasattr(_lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
lowerCamelCase__ =random_input_ids(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ =model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )[0]
lowerCamelCase__ =tf.gradients(_lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ =model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )[0]
lowerCamelCase__ =tf.gradients(_lowerCamelCase , model.trainable_variables )
return gradients
lowerCamelCase__ =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _a ( self , _lowerCamelCase ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ =timeit.repeat(
_lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(_lowerCamelCase ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def _a ( self , _lowerCamelCase ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
lowerCamelCase__ =start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
lowerCamelCase__ ="N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ =nvml.nvmlDeviceGetMemoryInfo(_lowerCamelCase )
lowerCamelCase__ =meminfo.used
lowerCamelCase__ =Memory(_lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
lowerCamelCase__ =None
else:
lowerCamelCase__ =measure_peak_memory_cpu(_lowerCamelCase )
lowerCamelCase__ =Memory(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ =stop_memory_tracing(_lowerCamelCase )
if memory is None:
lowerCamelCase__ =summary.total
else:
lowerCamelCase__ =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 132
| 1
|
def a ( A__ , A__ , A__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
SCREAMING_SNAKE_CASE__ : Tuple = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def a ( A__ = 1_7_7_7 , A__ = 1_8_5_5 , A__ = 8 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = base
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _modexpt(A__ , A__ , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
A : Dict = 5
A : Optional[int] = 10
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = SpeechaTextTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def __A ( self : Optional[Any] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
SCREAMING_SNAKE_CASE_ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
SCREAMING_SNAKE_CASE_ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
SCREAMING_SNAKE_CASE_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "<pad>"
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1_001 )
def __A ( self : List[Any] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __A ( self : List[str] ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {"input_ids": [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCamelCase__ = '''C\'est trop cool'''
lowerCamelCase__ = '''Esto es genial'''
@classmethod
def __A ( cls : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __A ( self : str ) -> int:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __A ( self : List[Any] ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def __A ( self : Any ) -> int:
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [ES_CODE, 4, 1_601, 47, 7_647, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "fr"
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE_ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 140
| 0
|
def _SCREAMING_SNAKE_CASE ( snake_case ) -> list[list[float]]:
_UpperCAmelCase = []
for data in source_data:
for i, el in enumerate(snake_case ):
if len(snake_case ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(snake_case ) )
return data_lists
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[list[float]]:
_UpperCAmelCase = []
for dlist, weight in zip(snake_case , snake_case ):
_UpperCAmelCase = min(snake_case )
_UpperCAmelCase = max(snake_case )
_UpperCAmelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_UpperCAmelCase = f"Invalid weight of {weight:f} provided"
raise ValueError(snake_case )
score_lists.append(snake_case )
return score_lists
def _SCREAMING_SNAKE_CASE ( snake_case ) -> list[float]:
_UpperCAmelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(snake_case ):
_UpperCAmelCase = final_scores[j] + ele
return final_scores
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[list[float]]:
_UpperCAmelCase = get_data(snake_case )
_UpperCAmelCase = calculate_each_score(snake_case , snake_case )
_UpperCAmelCase = generate_final_scores(snake_case )
# append scores to source data
for i, ele in enumerate(snake_case ):
source_data[i].append(snake_case )
return source_data
| 175
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a = logging.get_logger(__name__)
a = "T5Config"
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> jnp.ndarray:
_UpperCAmelCase = jnp.zeros_like(snake_case )
_UpperCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCAmelCase = shifted_input_ids.at[:, 0].set(snake_case )
_UpperCAmelCase = jnp.where(shifted_input_ids == -1_0_0 , snake_case , snake_case )
return shifted_input_ids
class _A ( __lowercase ):
__a = """mt5"""
__a = MTaConfig
class _A ( __lowercase ):
__a = """mt5"""
__a = MTaConfig
class _A ( __lowercase ):
__a = """mt5"""
__a = MTaConfig
| 175
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'open-llama'
def __init__( self: List[Any] ,__lowerCAmelCase: int=100_000 ,__lowerCAmelCase: Optional[int]=4_096 ,__lowerCAmelCase: Optional[int]=11_008 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: int="silu" ,__lowerCAmelCase: Dict=2_048 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: Any=1e-6 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: List[Any]=1 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = rms_norm_eps
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : str = kwargs.pop(
"use_memorry_efficient_attention" ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_dropout_prob
_lowerCamelCase : Optional[int] = use_stable_embedding
_lowerCamelCase : List[Any] = shared_input_output_embedding
_lowerCamelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,tie_word_embeddings=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,__lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_lowerCamelCase : Optional[Any] = self.rope_scaling.get("type" ,__lowerCAmelCase )
_lowerCamelCase : int = self.rope_scaling.get("factor" ,__lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 46
|
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : str ) -> int:
debug_launcher(test_script.main )
def snake_case_ ( self : Optional[int] ) -> List[Any]:
debug_launcher(test_ops.main )
| 249
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self : Any , __snake_case : int , __snake_case : Optional[Any]=13 , __snake_case : int=7 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : List[str]=True , __snake_case : int=True , __snake_case : str=99 , __snake_case : Dict=24 , __snake_case : int=2 , __snake_case : Dict=6 , __snake_case : str=37 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Any=16 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=0.02 , __snake_case : str=3 , __snake_case : List[Any]=None , __snake_case : Any=1000 , ) -> str:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : Optional[int] = is_training
_a : Dict = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : List[Any] = use_labels
_a : List[str] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Dict = intermediate_size
_a : str = hidden_act
_a : str = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : List[str] = type_vocab_size
_a : List[Any] = type_sequence_label_size
_a : Optional[int] = initializer_range
_a : str = num_labels
_a : int = scope
_a : Tuple = range_bbox
def snake_case_ ( self : Any ) -> Any:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : Any = bbox[i, j, 3]
_a : Any = bbox[i, j, 1]
_a : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : int = bbox[i, j, 2]
_a : str = bbox[i, j, 0]
_a : List[Any] = t
_a : Any = None
if self.use_input_mask:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Any = None
_a : Union[str, Any] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , ) -> Any:
_a : Union[str, Any] = LiltModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Union[str, Any] = model(__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int , ) -> Tuple:
_a : List[str] = self.num_labels
_a : Optional[Any] = LiltForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[Any] = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ) -> Optional[int]:
_a : List[str] = LiltForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Any ) -> Optional[int]:
_a : List[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
def snake_case_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Optional[int] ) -> List[str]:
return True
def snake_case_ ( self : int ) -> Dict:
_a : Union[str, Any] = LiltModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : str ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : str ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def snake_case_ ( self : Dict ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = LiltModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ) -> str:
_a : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__snake_case )
_a : List[str] = torch.tensor([[1, 2]] , device=__snake_case )
_a : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(input_ids=__snake_case , bbox=__snake_case )
_a : Optional[Any] = torch.Size([1, 2, 768] )
_a : Optional[Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __snake_case , atol=1E-3 ) )
| 249
| 1
|
__lowerCamelCase : Any = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCamelCase_(lowerCamelCase_ ) -> int:
UpperCAmelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase = Stack()
UpperCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase_ )
elif i == ")":
# RULE 4
UpperCAmelCase = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase = operators[opr](lowerCamelCase_ , lowerCamelCase_ )
operand_stack.push(lowerCamelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowerCamelCase : Dict = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 323
|
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase_() -> List[str]:
UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase = g.get_repo("huggingface/diffusers" )
UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase = sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
UpperCAmelCase = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 323
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
pass
class UpperCamelCase :
def __init__( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = data
_SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self
_SCREAMING_SNAKE_CASE : int = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(snake_case__ )
yield node.data
_SCREAMING_SNAKE_CASE : Optional[Any] = node.next_node
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase_ : str = Node(1)
lowercase_ : List[str] = Node(2)
lowercase_ : int = Node(3)
lowercase_ : List[str] = Node(4)
print(root_node.has_loop) # False
lowercase_ : Union[str, Any] = root_node.next_node
print(root_node.has_loop) # True
lowercase_ : List[str] = Node(5)
lowercase_ : Optional[Any] = Node(6)
lowercase_ : Any = Node(5)
lowercase_ : List[str] = Node(6)
print(root_node.has_loop) # False
lowercase_ : int = Node(1)
print(root_node.has_loop) # False
| 295
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = DiTPipeline
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A__ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=snake_case__ , )
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL()
_SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler()
_SCREAMING_SNAKE_CASE : Dict = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(snake_case__ )
else:
_SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = "cpu"
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_SCREAMING_SNAKE_CASE : List[str] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=snake_case__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
_SCREAMING_SNAKE_CASE : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"]
_SCREAMING_SNAKE_CASE : Any = pipe.get_label_ids(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = pipe(snake_case__ , generator=snake_case__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : int = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
_SCREAMING_SNAKE_CASE : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
_SCREAMING_SNAKE_CASE : List[Any] = ["vase", "umbrella"]
_SCREAMING_SNAKE_CASE : int = pipe.get_label_ids(snake_case__ )
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = pipe(snake_case__ , generator=snake_case__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 295
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class a__( snake_case__ ):
a_ : Union[str, Any] = '''trajectory_transformer'''
a_ : Optional[int] = ['''past_key_values''']
a_ : Dict = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCAmelCase=100 , _UpperCAmelCase=5 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=249 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=25 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_006 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5_0256 , _UpperCAmelCase=5_0256 , **_UpperCAmelCase , ) -> Optional[int]:
snake_case__ =vocab_size
snake_case__ =action_weight
snake_case__ =reward_weight
snake_case__ =value_weight
snake_case__ =max_position_embeddings
snake_case__ =block_size
snake_case__ =action_dim
snake_case__ =observation_dim
snake_case__ =transition_dim
snake_case__ =learning_rate
snake_case__ =n_layer
snake_case__ =n_head
snake_case__ =n_embd
snake_case__ =embd_pdrop
snake_case__ =attn_pdrop
snake_case__ =resid_pdrop
snake_case__ =initializer_range
snake_case__ =layer_norm_eps
snake_case__ =kaiming_initializer_range
snake_case__ =use_cache
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 538
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 538
| 1
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = 'char'
UpperCAmelCase : Any = 'bpe'
UpperCAmelCase : Optional[int] = 'wp'
__UpperCAmelCase : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = ['image_processor', 'char_tokenizer']
UpperCAmelCase : Any = 'ViTImageProcessor'
UpperCAmelCase : Optional[int] = 'MgpstrTokenizer'
def __init__( self : Union[str, Any] , __snake_case : Any=None , __snake_case : List[Any]=None , **__snake_case : Optional[Any] ) -> List[str]:
_a : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
_a : Any = kwargs.pop('''feature_extractor''' )
_a : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
_a : Union[str, Any] = tokenizer
_a : Tuple = AutoTokenizer.from_pretrained('''gpt2''' )
_a : List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : Tuple=None , **__snake_case : Any ) -> List[str]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
_a : Optional[Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None:
_a : Any = self.char_tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
_a : Optional[Any] = encodings['''input_ids''']
return inputs
def snake_case_ ( self : Optional[int] , __snake_case : List[Any] ) -> Tuple:
_a , _a , _a : List[str] = sequences
_a : str = char_preds.size(0 )
_a , _a : Dict = self._decode_helper(__snake_case , '''char''' )
_a , _a : Optional[int] = self._decode_helper(__snake_case , '''bpe''' )
_a , _a : int = self._decode_helper(__snake_case , '''wp''' )
_a : Any = []
_a : int = []
for i in range(__snake_case ):
_a : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_a : Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]]
_a : List[str] = scores.index(max(__snake_case ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_a : Dict = {}
_a : Optional[Any] = final_strs
_a : List[str] = final_scores
_a : List[Any] = char_strs
_a : int = bpe_strs
_a : Tuple = wp_strs
return out
def snake_case_ ( self : Tuple , __snake_case : str , __snake_case : Tuple ) -> Any:
if format == DecodeType.CHARACTER:
_a : Any = self.char_decode
_a : Optional[Any] = 1
_a : Union[str, Any] = '''[s]'''
elif format == DecodeType.BPE:
_a : List[str] = self.bpe_decode
_a : Tuple = 2
_a : Union[str, Any] = '''#'''
elif format == DecodeType.WORDPIECE:
_a : Union[str, Any] = self.wp_decode
_a : Optional[int] = 102
_a : Tuple = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
_a , _a : Optional[Any] = [], []
_a : List[Any] = pred_logits.size(0 )
_a : Optional[int] = pred_logits.size(1 )
_a , _a : List[str] = pred_logits.topk(1 , dim=-1 , largest=__snake_case , sorted=__snake_case )
_a : int = preds_index.view(-1 , __snake_case )[:, 1:]
_a : int = decoder(__snake_case )
_a , _a : Optional[int] = torch.nn.functional.softmax(__snake_case , dim=2 ).max(dim=2 )
_a : str = preds_max_prob[:, 1:]
for index in range(__snake_case ):
_a : Tuple = preds_str[index].find(__snake_case )
_a : Optional[Any] = preds_str[index][:pred_eos]
_a : int = preds_index[index].cpu().tolist()
_a : int = pred_index.index(__snake_case ) if eos_token in pred_index else -1
_a : Union[str, Any] = preds_max_prob[index][: pred_eos_index + 1]
_a : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__snake_case )
conf_scores.append(__snake_case )
return dec_strs, conf_scores
def snake_case_ ( self : Dict , __snake_case : Union[str, Any] ) -> List[Any]:
_a : List[str] = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__snake_case )]
return decode_strs
def snake_case_ ( self : Union[str, Any] , __snake_case : Optional[Any] ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__snake_case )
def snake_case_ ( self : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_a : str = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__snake_case )]
return decode_strs
| 249
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = 'convbert'
def __init__( self : Optional[Any] , __snake_case : Union[str, Any]=30522 , __snake_case : Any=768 , __snake_case : Optional[Any]=12 , __snake_case : Tuple=12 , __snake_case : Optional[Any]=3072 , __snake_case : List[str]="gelu" , __snake_case : int=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[Any]=512 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=1E-1_2 , __snake_case : Any=1 , __snake_case : List[str]=0 , __snake_case : Union[str, Any]=2 , __snake_case : Union[str, Any]=768 , __snake_case : Dict=2 , __snake_case : Union[str, Any]=9 , __snake_case : Union[str, Any]=1 , __snake_case : Union[str, Any]=None , **__snake_case : Optional[Any] , ) -> List[Any]:
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
_a : str = vocab_size
_a : str = hidden_size
_a : List[str] = num_hidden_layers
_a : Any = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Dict = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Tuple = type_vocab_size
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
_a : Optional[int] = embedding_size
_a : int = head_ratio
_a : List[str] = conv_kernel_size
_a : Any = num_groups
_a : Tuple = classifier_dropout
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def snake_case_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 249
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( __lowerCAmelCase ):
snake_case__ : Optional[int] = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
lowercase = size if size is not None else {"""shortest_edge""": 224}
lowercase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase = get_resize_output_image_size(lowerCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(lowerCAmelCase_ , param_name="""size""" , default_to_square=lowerCAmelCase_ )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" , default_to_square=lowerCAmelCase_ )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
lowercase = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
lowercase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 359
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
_A = tempfile.mkdtemp()
# fmt: off
_A = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
_A = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_A = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_A = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ )
_A = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ , return_tensors="""np""" )
_A = tokenizer(lowerCAmelCase_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = ["""cat""", """nasa badge"""]
_A = processor(text=lowerCAmelCase_ )
_A = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Optional[int]:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = [["""cat""", """nasa badge"""], ["""person"""]]
_A = processor(text=lowerCAmelCase_ )
_A = 16
_A = len(lowerCAmelCase_ )
_A = max([len(lowerCAmelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Tuple:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = ["""cat""", """nasa badge"""]
_A = processor(text=lowerCAmelCase_ )
_A = 16
_A = inputs["""input_ids"""]
_A = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = self.prepare_image_inputs()
_A = processor(images=lowerCAmelCase_ , query_images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 401
| 0
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 707
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCamelCase : Tuple = '\\n Text data.\n Second line of data.'
_UpperCamelCase : Tuple = 'file'
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
lowercase = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , 'w' ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
lowercase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
lowercase = input_paths[compression_format]
lowercase = tmp_path / 'cache'
lowercase = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
lowercase = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
lowercase = f.read()
with open(__snake_case ) as f:
lowercase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = 'custom_cache'
lowercase = 'custom_extracted_dir'
lowercase = tmp_path / 'custom_extracted_path'
if default_extracted:
lowercase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __snake_case )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) )
lowercase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase = xz_file
lowercase = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
lowercase = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
lowercase = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
lowercase = './__missing_file__.txt'
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
'''simple docstring'''
lowercase = get_from_cache(f'tmp://{tmpfs_file}' )
with open(__snake_case ) as f:
lowercase = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with pytest.raises(__snake_case ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
http_get('https://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
ftp_get('ftp://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
fsspec_get('s3://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head('s3://huggingface.co' )
| 134
| 0
|
'''simple docstring'''
from math import factorial
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 379
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_lowercase =[
{
"""image""": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =vqa_pipeline(lowerCAmelCase_ , top_k=1 )
self.assertEqual(
lowerCAmelCase_ , [
[{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}],
[{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}],
] , )
@require_torch
def __lowerCAmelCase ( self ):
_lowercase =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_lowercase ="""./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowercase ="""How many cats are there?"""
_lowercase =vqa_pipeline(image=lowerCAmelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}, {"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}] )
_lowercase =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}, {"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}] )
@slow
@require_torch
def __lowerCAmelCase ( self ):
_lowercase =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_lowercase ="""./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowercase ="""How many cats are there?"""
_lowercase =vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
_lowercase =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
_lowercase =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def __lowerCAmelCase ( self ):
pass
| 714
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _a ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BartphoTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def __lowerCAmelCase ( self ):
super().setUp()
_lowercase =["▁This", "▁is", "▁a", "▁t", "est"]
_lowercase =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_lowercase ={"unk_token": "<unk>"}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
_lowercase =BartphoTokenizer(lowerCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase ="This is a là test"
_lowercase ="This is a<unk><unk> test"
return input_text, output_text
def __lowerCAmelCase ( self ):
_lowercase =BartphoTokenizer(lowerCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
_lowercase ="This is a là test"
_lowercase ="▁This ▁is ▁a ▁l à ▁t est".split()
_lowercase =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 594
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( ):
lowercase = 2
while True:
if is_prime(lowercase_ ):
yield num
num += 1
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 200_0000 ):
return sum(takewhile(lambda lowercase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 588
| 0
|
import math
import sys
def __lowercase( __snake_case : Any ) -> int:
if number != int(lowerCamelCase_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__snake_case = [-1] * (number + 1)
__snake_case = 0
for i in range(1 ,number + 1 ):
__snake_case = sys.maxsize
__snake_case = int(math.sqrt(lowerCamelCase_ ) )
for j in range(1 ,root + 1 ):
__snake_case = 1 + answers[i - (j**2)]
__snake_case = min(lowerCamelCase_ ,lowerCamelCase_ )
__snake_case = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = GPTSanJapaneseTokenizer
lowercase__ = False
lowercase__ = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__snake_case = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
pass # TODO add if relevant
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、世界。 こんばんは、㔺界。'
__snake_case = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__snake_case = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = 'こんにちは、世界。こんばんは、世界。😀'
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case = 'こんにちは、世界。'
__snake_case = 'こんばんは、㔺界。😀'
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = tokenizer.encode('あンいワ' )
__snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
__snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCamelCase ( self ):
__snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
__snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCamelCase ( self ):
# tokenizer has no padding token
pass
| 345
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = PhobertTokenizer
__snake_case = False
def a__ ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[str] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_lowerCamelCase : Optional[int] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCamelCase : Optional[int] = ['''#version: 0.2''', '''l à</w>''']
_lowerCamelCase : Optional[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def a__ ( self , **_lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , _lowercase ) -> Union[str, Any]:
_lowerCamelCase : List[str] = '''Tôi là VinAI Research'''
_lowerCamelCase : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def a__ ( self ) -> int:
_lowerCamelCase : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Optional[Any] = '''Tôi là VinAI Research'''
_lowerCamelCase : Tuple = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_lowerCamelCase : Optional[int] = tokenizer.tokenize(_lowercase )
print(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
| 434
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict =[
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE__ : List[Any] =[
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE__ : Optional[int] =[
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE__ : List[Any] =[
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE__ : Optional[int] =[
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 434
| 1
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase = ''
__lowerCAmelCase = ''
__lowerCAmelCase = ''
__lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
a_ , a_ : Tuple = get_dataset(__A , __A )
print('''Processing...''' )
a_ , a_ , a_ : Optional[int] = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a_ : List[Any] = random_chars(32 )
a_ : Dict = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
a_ : Dict = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(__A )} with {file_name}' )
a_ : List[str] = []
for anno in new_annos[index]:
a_ : int = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__A )
with open(f'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : List[str] = []
a_ : str = []
for label_file in glob.glob(os.path.join(__A , '''*.txt''' ) ):
a_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__A ) as in_file:
a_ : List[Any] = in_file.readlines()
a_ : int = os.path.join(__A , f'{label_name}.jpg' )
a_ : Tuple = []
for obj_list in obj_lists:
a_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _UpperCAmelCase ( __A : list , __A : list , __A : int = 1 ):
a_ : Any = []
a_ : List[Any] = []
a_ : Optional[int] = []
for idx in range(len(__A ) ):
a_ : Any = []
a_ : Optional[int] = img_list[idx]
path_list.append(__A )
a_ : str = anno_list[idx]
a_ : List[str] = cva.imread(__A )
if flip_type == 1:
a_ : Optional[int] = cva.flip(__A , __A )
for bbox in img_annos:
a_ : Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a_ : Any = cva.flip(__A , __A )
for bbox in img_annos:
a_ : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( __A : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
a_ : List[str] = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 666
|
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.