code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _a ( lowerCamelCase=None ):
if subparsers is not None:
lowerCamelCase : Optional[Any] = subparsers.add_parser("""env""" )
else:
lowerCamelCase : Tuple = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""", default=lowerCamelCase, help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = torch.__version__
lowerCamelCase : List[str] = torch.cuda.is_available()
lowerCamelCase : Any = is_xpu_available()
lowerCamelCase : Tuple = is_npu_available()
lowerCamelCase : str = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
lowerCamelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
lowerCamelCase : Dict = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(lowerCamelCase ),
"""PyTorch NPU available""": str(lowerCamelCase ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowerCamelCase : Tuple = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowerCamelCase : str = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase, lowerCamelCase )
else F'''\t{accelerate_config}'''
)
print(lowerCamelCase )
lowerCamelCase : str = accelerate_config
return info
def _a ( ):
lowerCamelCase : int = env_command_parser()
lowerCamelCase : Optional[Any] = parser.parse_args()
env_command(lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ):
if attention_mask is None:
lowerCamelCase : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase : List[str] = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=lowerCamelCase )
if decoder_head_mask is None:
lowerCamelCase : int = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=lowerCamelCase )
if cross_attn_head_mask is None:
lowerCamelCase : Dict = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=9_9 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=4 , __magic_name__=4 , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=2_0 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , ):
lowerCamelCase : List[Any] = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : str = seq_length
lowerCamelCase : List[str] = is_training
lowerCamelCase : Dict = use_labels
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Any = hidden_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : List[str] = encoder_layerdrop
lowerCamelCase : Any = decoder_layerdrop
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Tuple = eos_token_id
lowerCamelCase : List[Any] = pad_token_id
lowerCamelCase : Optional[Any] = bos_token_id
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[Any] = self.eos_token_id # Eos Token
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : int = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : Tuple = self.get_config()
lowerCamelCase : Optional[Any] = prepare_mam_aaa_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = MaMaaaModel(config=__magic_name__ ).get_decoder().to(__magic_name__ ).eval()
lowerCamelCase : Optional[int] = inputs_dict["""input_ids"""]
lowerCamelCase : Dict = inputs_dict["""attention_mask"""]
lowerCamelCase : List[Any] = inputs_dict["""head_mask"""]
# first forward pass
lowerCamelCase : int = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
lowerCamelCase , lowerCamelCase : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase : str = model(__magic_name__ , attention_mask=__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-2 ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = MaMaaaModel(config=__magic_name__ ).to(__magic_name__ ).eval()
lowerCamelCase : Dict = model(**__magic_name__ )
lowerCamelCase : Optional[int] = outputs.encoder_last_hidden_state
lowerCamelCase : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Tuple = model.get_encoder()
encoder.save_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = MaMaaaEncoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
lowerCamelCase : Union[str, Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : int = model.get_decoder()
decoder.save_pretrained(__magic_name__ )
lowerCamelCase : List[Any] = MaMaaaDecoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
lowerCamelCase : Dict = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__magic_name__ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_UpperCAmelCase : str = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase__ ( self ):
lowerCamelCase : str = MaMaaaModelTester(self )
lowerCamelCase : Dict = ConfigTester(self , config_class=__magic_name__ )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
lowerCamelCase , lowerCamelCase : int = model_class.from_pretrained(__magic_name__ , output_loading_info=__magic_name__ )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase : Optional[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : List[Any] = copy.deepcopy(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
if not self.is_encoder_decoder:
lowerCamelCase : Optional[int] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCamelCase : List[Any] = inputs["""input_ids"""]
lowerCamelCase : Any = inputs.get("""decoder_input_ids""" , __magic_name__ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __magic_name__ )
lowerCamelCase : Any = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase : Union[str, Any] = wte(__magic_name__ )
else:
lowerCamelCase : Any = wte(__magic_name__ )
lowerCamelCase : Dict = wte(__magic_name__ )
with torch.no_grad():
model(**__magic_name__ )[0]
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Any = input_dict["""input_ids"""]
lowerCamelCase : Union[str, Any] = input_ids.ne(1 ).to(__magic_name__ )
lowerCamelCase : Dict = MaMaaaForConditionalGeneration(__magic_name__ ).eval().to(__magic_name__ )
if torch_device == "cuda":
model.half()
model.generate(__magic_name__ , attention_mask=__magic_name__ )
model.generate(num_beams=4 , do_sample=__magic_name__ , early_stopping=__magic_name__ , num_return_sequences=3 )
def _a ( lowerCamelCase ):
return torch.tensor(lowerCamelCase, dtype=torch.long, device=lowerCamelCase )
_lowerCamelCase =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__magic_name__ )
lowerCamelCase : int = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCamelCase : Optional[Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCamelCase : str = prepare_mam_aaa_inputs_dict(model.config , __magic_name__ , __magic_name__ )
with torch.no_grad():
lowerCamelCase : List[Any] = model(**__magic_name__ )[0]
lowerCamelCase : Any = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__magic_name__ )
# change to intended input
lowerCamelCase : int = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCamelCase : Optional[Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCamelCase : Dict = prepare_mam_aaa_inputs_dict(model.config , __magic_name__ , __magic_name__ )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**__magic_name__ )[0]
lowerCamelCase : List[Any] = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
lowerCamelCase : Optional[Any] = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__magic_name__ )
lowerCamelCase : Dict = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCamelCase : List[Any] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase : List[str] = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
lowerCamelCase : List[str] = model.generate(
input_ids=dct["""input_ids"""].to(__magic_name__ ) , attention_mask=dct["""attention_mask"""].to(__magic_name__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCamelCase : Union[str, Any] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCamelCase : Optional[int] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__magic_name__ , skip_special_tokens=__magic_name__ )
assert generated == expected_en
| 287 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 1 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : int = state_dict.pop(lowerCamelCase )
lowerCamelCase : List[str] = val
def _a ( lowerCamelCase ):
lowerCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase : Tuple = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
lowerCamelCase : Optional[Any] = value
else:
lowerCamelCase : Optional[int] = value
return new_state_dict
def _a ( lowerCamelCase ):
lowerCamelCase : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[:256, :]
lowerCamelCase : Tuple = in_proj_bias[:256]
lowerCamelCase : Optional[int] = in_proj_weight[256:512, :]
lowerCamelCase : Any = in_proj_bias[256:512]
lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :]
lowerCamelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[:256, :]
lowerCamelCase : Tuple = in_proj_bias[:256]
lowerCamelCase : Tuple = in_proj_weight[256:512, :]
lowerCamelCase : Any = in_proj_bias[256:512]
lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :]
lowerCamelCase : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowerCamelCase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase : Any = in_proj_weight_cross_attn[:256, :]
lowerCamelCase : List[str] = in_proj_bias_cross_attn[:256]
lowerCamelCase : str = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase : Any = in_proj_bias_cross_attn[256:512]
lowerCamelCase : Dict = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Optional[int] = image.size
lowerCamelCase : str = max(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Optional[int] = 800 if """detection""" in checkpoint_url else 1000
lowerCamelCase : int = target_max_size / current_max_size
lowerCamelCase : Optional[int] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _a ( lowerCamelCase ):
lowerCamelCase : str = F.to_tensor(lowerCamelCase )
lowerCamelCase : Optional[Any] = F.normalize(lowerCamelCase, mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
logger.info("""Converting model...""" )
# load original state dict
lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Union[str, Any] = rename_backbone_keys(lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase : str = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase : List[str] = state_dict.pop(lowerCamelCase )
lowerCamelCase : Dict = val
# create HuggingFace model and load state dict
lowerCamelCase : Optional[Any] = TableTransformerConfig(
backbone="""resnet18""", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
lowerCamelCase : Tuple = 15
lowerCamelCase : Any = 2
lowerCamelCase : Dict = {0: """table""", 1: """table rotated"""}
lowerCamelCase : Union[str, Any] = idalabel
lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
else:
lowerCamelCase : Any = 125
lowerCamelCase : Union[str, Any] = 6
lowerCamelCase : Dict = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
lowerCamelCase : List[str] = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = DetrImageProcessor(
format="""coco_detection""", max_size=800 if """detection""" in checkpoint_url else 1000 )
lowerCamelCase : Optional[Any] = TableTransformerForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# verify our conversion
lowerCamelCase : List[str] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
lowerCamelCase : Optional[Any] = hf_hub_download(repo_id="""nielsr/example-pdf""", repo_type="""dataset""", filename=lowerCamelCase )
lowerCamelCase : Optional[Any] = Image.open(lowerCamelCase ).convert("""RGB""" )
lowerCamelCase : List[Any] = normalize(resize(lowerCamelCase, lowerCamelCase ) ).unsqueeze(0 )
lowerCamelCase : Tuple = model(lowerCamelCase )
if "detection" in checkpoint_url:
lowerCamelCase : Optional[Any] = (1, 15, 3)
lowerCamelCase : str = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
lowerCamelCase : Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
lowerCamelCase : List[str] = (1, 125, 7)
lowerCamelCase : List[Any] = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
lowerCamelCase : List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
lowerCamelCase : Any = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(lowerCamelCase )
image_processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 287 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ="""▁"""
_lowerCamelCase ={"""vocab_file""": """prophetnet.tokenizer"""}
_lowerCamelCase ={
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
_lowerCamelCase ={
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
_lowerCamelCase ={
"""microsoft/xprophetnet-large-wiki100-cased""": 5_1_2,
}
def _a ( lowerCamelCase ):
lowerCamelCase : str = collections.OrderedDict()
with open(lowerCamelCase, """r""", encoding="""utf-8""" ) as reader:
lowerCamelCase : int = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
lowerCamelCase : List[Any] = token.rstrip("""\n""" )
lowerCamelCase : List[str] = index
return vocab
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__ , __magic_name__="[SEP]" , __magic_name__="[SEP]" , __magic_name__="[SEP]" , __magic_name__="[UNK]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCamelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase : Any = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(1_0 ):
lowerCamelCase : Dict = F'''[unused{i}]'''
lowerCamelCase : List[Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase : Any = 1_2
lowerCamelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__magic_name__ )
def __getstate__( self ):
lowerCamelCase : List[Any] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , __magic_name__ ):
lowerCamelCase : str = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase : int = {}
lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Tuple = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : List[str] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , __magic_name__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Any = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip()
return out_string
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : List[Any] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 287 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase =logging.getLogger(__name__)
def _a ( lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=16, lowerCamelCase = 10, lowerCamelCase = 2 ):
def get_dataset(lowerCamelCase ):
lowerCamelCase : List[str] = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(lowerCamelCase, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
lowerCamelCase : Optional[int] = get_dataset(lowerCamelCase )
lowerCamelCase : str = get_dataset(lowerCamelCase )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, shuffle=lowerCamelCase, batch_size=lowerCamelCase, num_workers=4 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, shuffle=lowerCamelCase, batch_size=lowerCamelCase, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
lowerCamelCase : Any = []
for epoch in range(lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowerCamelCase , lowerCamelCase : Dict = batch
lowerCamelCase : Tuple = model(lowerCamelCase )
lowerCamelCase : int = torch.nn.functional.mse_loss(lowerCamelCase, lowerCamelCase )
accelerator.backward(lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module):
def __init__( self ):
super().__init__()
lowerCamelCase : Tuple = nn.Parameter(torch.randn(1 ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
def UpperCamelCase__ ( self , __magic_name__ ):
return x * self.a + self.b
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Optional[int] = DummyModel()
lowerCamelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
lowerCamelCase : int = ProjectConfiguration(total_limit=1 , project_dir=__magic_name__ , automatic_checkpoint_naming=__magic_name__ )
# Train baseline
lowerCamelCase : int = Accelerator(project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
# Train baseline
lowerCamelCase : Tuple = Accelerator()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
lowerCamelCase : int = os.path.join(__magic_name__ , """initial""" )
accelerator.save_state(__magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Union[str, Any] = model.a.item(), model.b.item()
lowerCamelCase : Dict = optimizer.state_dict()
lowerCamelCase : str = train(3 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : str = model.a.item(), model.b.item()
lowerCamelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : Any = dummy_dataloaders()
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.load_state(__magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Optional[int] = model.a.item(), model.b.item()
lowerCamelCase : int = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Tuple = train(2 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save everything
lowerCamelCase : List[Any] = os.path.join(__magic_name__ , """checkpoint""" )
accelerator.save_state(__magic_name__ )
# Load everything back in and make sure all states work
accelerator.load_state(__magic_name__ )
test_rands += train(1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Optional[int] = model.a.item(), model.b.item()
lowerCamelCase : str = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Tuple = DummyModel()
lowerCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : Optional[int] = dummy_dataloaders()
lowerCamelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=__magic_name__ )
# Train baseline
lowerCamelCase : str = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
accelerator.save_state()
((lowerCamelCase) , (lowerCamelCase)) : Dict = model.a.item(), model.b.item()
lowerCamelCase : Optional[int] = optimizer.state_dict()
lowerCamelCase : Tuple = train(3 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : Tuple = model.a.item(), model.b.item()
lowerCamelCase : Dict = optimizer.state_dict()
# Train partially
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
lowerCamelCase : int = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__magic_name__ )
lowerCamelCase : str = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.load_state(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_0""" ) )
((lowerCamelCase) , (lowerCamelCase)) : Tuple = model.a.item(), model.b.item()
lowerCamelCase : List[str] = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[Any] = train(2 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
((lowerCamelCase) , (lowerCamelCase)) : List[Any] = model.a.item(), model.b.item()
lowerCamelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = torch.tensor([1, 2, 3] )
lowerCamelCase : Union[str, Any] = torch.tensor([2, 3, 4] )
lowerCamelCase : Union[str, Any] = DummyModel()
lowerCamelCase : Optional[int] = torch.optim.Adam(net.parameters() )
lowerCamelCase : int = Accelerator()
with self.assertRaises(__magic_name__ ) as ve:
accelerator.register_for_checkpointing(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : Dict = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : Optional[Any] = DummyModel()
lowerCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase : Tuple = torch.optim.lr_scheduler.StepLR(__magic_name__ , step_size=1 , gamma=0.99 )
lowerCamelCase , lowerCamelCase : List[Any] = dummy_dataloaders()
lowerCamelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=__magic_name__ )
# Train baseline
lowerCamelCase : Optional[Any] = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save initial
accelerator.save_state()
lowerCamelCase : Dict = scheduler.state_dict()
train(3 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.assertNotEqual(__magic_name__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(__magic_name__ , scheduler.state_dict() )
def UpperCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowerCamelCase : List[Any] = DummyModel()
lowerCamelCase : Tuple = ProjectConfiguration(automatic_checkpoint_naming=__magic_name__ , total_limit=2 )
# Train baseline
lowerCamelCase : Optional[Any] = Accelerator(project_dir=__magic_name__ , project_config=__magic_name__ )
lowerCamelCase : Optional[int] = accelerator.prepare(__magic_name__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def UpperCamelCase__ ( self ):
lowerCamelCase : str = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase ="""/tmp/accelerate/state_checkpointing"""
_lowerCamelCase =DummyModel()
_lowerCamelCase =torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCamelCase =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase , _lowerCamelCase =dummy_dataloaders()
_lowerCamelCase =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase , _lowerCamelCase =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase =group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
_lowerCamelCase =group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
_lowerCamelCase =group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = DebertaTokenizer
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[str] = DebertaTokenizerFast
def UpperCamelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowerCamelCase : List[str] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase : Any = {"""unk_token""": """[UNK]"""}
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def UpperCamelCase__ ( self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : int = """lower newer"""
lowerCamelCase : List[Any] = """lower newer"""
return input_text, output_text
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = """lower newer"""
lowerCamelCase : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase : Tuple = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : int = tokens + [tokenizer.unk_token]
lowerCamelCase : Any = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : Optional[int] = tokenizer("""Hello""" , """World""" )
lowerCamelCase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowerCamelCase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase : int = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase : List[Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowerCamelCase : List[Any] = tokenizer(__magic_name__ , padding=__magic_name__ )
lowerCamelCase : Any = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["""input_ids"""]]
# fmt: off
lowerCamelCase : Union[str, Any] = {
"""input_ids""": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase : Any = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 287 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = botoa.client("""iam""" )
lowerCamelCase : Optional[int] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCamelCase, AssumeRolePolicyDocument=json.dumps(lowerCamelCase, indent=2 ) )
lowerCamelCase : Tuple = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCamelCase, PolicyName=F'''{role_name}_policy_permission''', PolicyDocument=json.dumps(lowerCamelCase, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=lowerCamelCase )["Role"]["Arn"]
def _a ( ):
lowerCamelCase : Union[str, Any] = _ask_options(
"""How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], lowerCamelCase, )
lowerCamelCase : Union[str, Any] = None
if credentials_configuration == 0:
lowerCamelCase : Any = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" )
lowerCamelCase : Optional[int] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
lowerCamelCase : Any = _ask_field("""AWS Access Key ID: """ )
lowerCamelCase : Optional[int] = aws_access_key_id
lowerCamelCase : Tuple = _ask_field("""AWS Secret Access Key: """ )
lowerCamelCase : Optional[int] = aws_secret_access_key
lowerCamelCase : Any = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" )
lowerCamelCase : int = aws_region
lowerCamelCase : Union[str, Any] = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], lowerCamelCase, )
if role_management == 0:
lowerCamelCase : Any = _ask_field("""Enter your IAM role name: """ )
else:
lowerCamelCase : List[Any] = """accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowerCamelCase )
lowerCamelCase : Tuple = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
lowerCamelCase : int = None
if is_custom_docker_image:
lowerCamelCase : List[str] = _ask_field("""Enter your Docker image: """, lambda lowerCamelCase : str(lowerCamelCase ).lower() )
lowerCamelCase : Dict = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
lowerCamelCase : Union[str, Any] = None
if is_sagemaker_inputs_enabled:
lowerCamelCase : Dict = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda lowerCamelCase : str(lowerCamelCase ).lower(), )
lowerCamelCase : int = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
lowerCamelCase : Optional[int] = None
if is_sagemaker_metrics_enabled:
lowerCamelCase : List[str] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda lowerCamelCase : str(lowerCamelCase ).lower(), )
lowerCamelCase : List[str] = _ask_options(
"""What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, )
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
if use_dynamo:
lowerCamelCase : Any = """dynamo_"""
lowerCamelCase : Dict = _ask_options(
"""Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
lowerCamelCase : Any = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
if use_custom_options:
lowerCamelCase : Union[str, Any] = _ask_options(
"""Which mode do you want to use?""", lowerCamelCase, lambda lowerCamelCase : TORCH_DYNAMO_MODES[int(lowerCamelCase )], default="""default""", )
lowerCamelCase : int = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
lowerCamelCase : int = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=lowerCamelCase, error_message="""Please enter yes or no.""", )
lowerCamelCase : Union[str, Any] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
lowerCamelCase : Optional[int] = _ask_options(
lowerCamelCase, lowerCamelCase, lambda lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowerCamelCase : List[str] = _ask_field(lowerCamelCase, lambda lowerCamelCase : str(lowerCamelCase ).lower(), default="""ml.p3.2xlarge""" )
lowerCamelCase : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowerCamelCase : Union[str, Any] = _ask_field(
"""How many machines do you want use? [1]: """, lowerCamelCase, default=1, )
lowerCamelCase : List[str] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=lowerCamelCase, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=lowerCamelCase, use_cpu=lowerCamelCase, dynamo_config=lowerCamelCase, eca_instance_type=lowerCamelCase, profile=lowerCamelCase, region=lowerCamelCase, iam_role_name=lowerCamelCase, mixed_precision=lowerCamelCase, num_machines=lowerCamelCase, sagemaker_inputs_file=lowerCamelCase, sagemaker_metrics_file=lowerCamelCase, )
| 287 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase ="""Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _a ( lowerCamelCase=None ):
if subparsers is not None:
lowerCamelCase : Optional[Any] = subparsers.add_parser("""tpu-config""", description=_description )
else:
lowerCamelCase : Tuple = argparse.ArgumentParser("""Accelerate tpu-config command""", description=_description )
# Core arguments
lowerCamelCase : Tuple = parser.add_argument_group(
"""Config Arguments""", """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""", type=lowerCamelCase, default=lowerCamelCase, help="""Path to the config file to use for accelerate.""", )
config_args.add_argument(
"""--tpu_name""", default=lowerCamelCase, help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""", )
config_args.add_argument(
"""--tpu_zone""", default=lowerCamelCase, help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""", )
lowerCamelCase : List[str] = parser.add_argument_group("""TPU Arguments""", """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""", action="""store_true""", help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""", )
pod_args.add_argument(
"""--command_file""", default=lowerCamelCase, help="""The path to the file containing the commands to run on the pod on startup.""", )
pod_args.add_argument(
"""--command""", action="""append""", nargs="""+""", help="""A command to run on the pod. Can be passed multiple times.""", )
pod_args.add_argument(
"""--install_accelerate""", action="""store_true""", help="""Whether to install accelerate on the pod. Defaults to False.""", )
pod_args.add_argument(
"""--accelerate_version""", default="""latest""", help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""", )
pod_args.add_argument(
"""--debug""", action="""store_true""", help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase ):
lowerCamelCase : Any = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
lowerCamelCase : List[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase : int = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase : Optional[int] = defaults.commands
if not args.tpu_name:
lowerCamelCase : List[str] = defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase : Dict = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase : int = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ), lowerCamelCase ):
lowerCamelCase : Tuple = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file, """r""" ) as f:
lowerCamelCase : Dict = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], lowerCamelCase ):
lowerCamelCase : Optional[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase : Tuple = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
lowerCamelCase : Optional[int] = """; """.join(lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase : str = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(lowerCamelCase )}''' )
return
subprocess.run(lowerCamelCase )
print("""Successfully setup pod.""" )
def _a ( ):
lowerCamelCase : int = tpu_command_parser()
lowerCamelCase : int = parser.parse_args()
tpu_command_launcher(lowerCamelCase )
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_lowerCamelCase ="""pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = CamembertTokenizer
_UpperCAmelCase : str = CamembertTokenizerFast
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Dict = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Optional[Any] = CamembertTokenizer(__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """<pad>"""
lowerCamelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_4 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = CamembertTokenizer(__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase : Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase : List[Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.encode(__magic_name__ )
lowerCamelCase : List[Any] = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowerCamelCase : str = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__magic_name__ )
lowerCamelCase : str = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : List[str] = self.get_rust_tokenizer()
lowerCamelCase : str = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__magic_name__ )
lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowerCamelCase : List[Any] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : List[Any] = tokenizer.encode(__magic_name__ )
lowerCamelCase : Tuple = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Dict = {"""input_ids""": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__magic_name__ , )
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase ={
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 287 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 1 |
import math
def _a ( lowerCamelCase = 100 ):
lowerCamelCase : Union[str, Any] = sum(i * i for i in range(1, n + 1 ) )
lowerCamelCase : Union[str, Any] = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 1 |
import math
def _a ( lowerCamelCase ):
lowerCamelCase : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase )
def _a ( lowerCamelCase = 1 / 1_2345 ):
lowerCamelCase : List[Any] = 0
lowerCamelCase : str = 0
lowerCamelCase : Any = 3
while True:
lowerCamelCase : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase ):
lowerCamelCase : Dict = int(lowerCamelCase )
total_partitions += 1
if check_partition_perfect(lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase ="""0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
if rng is None:
lowerCamelCase : Tuple = random.Random()
lowerCamelCase : Tuple = 1
for dim in shape:
total_dims *= dim
lowerCamelCase : List[str] = []
for _ in range(lowerCamelCase ):
values.append(rng.randint(0, vocab_size - 1 ) )
lowerCamelCase : Dict = np.array(lowerCamelCase, dtype=jnp.intaa ).reshape(lowerCamelCase )
return output
def _a ( lowerCamelCase, lowerCamelCase=None ):
lowerCamelCase : Tuple = ids_tensor(lowerCamelCase, vocab_size=2, rng=lowerCamelCase )
# make sure that at least one token is attended to for each batch
lowerCamelCase : str = 1
return attn_mask
@require_flax
class A__ :
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = ()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase : List[str] = 2
lowerCamelCase : Optional[int] = inputs["""input_ids"""].shape[-1] // 2
lowerCamelCase : Any = inputs["""input_ids"""][:max_batch_size, :sequence_length]
lowerCamelCase : str = jnp.ones_like(__magic_name__ )
lowerCamelCase : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase : Tuple = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase : Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = self._get_input_ids_and_config()
lowerCamelCase : List[Any] = False
lowerCamelCase : List[str] = max_length
lowerCamelCase : str = 0
for model_class in self.all_generative_model_classes:
lowerCamelCase : Optional[int] = model_class(__magic_name__ )
lowerCamelCase : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase : int = getattr(__magic_name__ , __magic_name__ )
lowerCamelCase : List[Any] = pt_model_class(__magic_name__ ).eval()
lowerCamelCase : List[str] = load_flax_weights_in_pytorch_model(__magic_name__ , flax_model.params )
lowerCamelCase : Union[str, Any] = flax_model.generate(__magic_name__ ).sequences
lowerCamelCase : Union[str, Any] = pt_model.generate(torch.tensor(__magic_name__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase : List[Any] = model_class(__magic_name__ )
lowerCamelCase : Dict = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : str = jit(model.generate )
lowerCamelCase : Any = jit_generate(__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = self._get_input_ids_and_config()
lowerCamelCase : int = True
lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase : int = model_class(__magic_name__ )
lowerCamelCase : List[str] = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : List[str] = jit(model.generate )
lowerCamelCase : Tuple = jit_generate(__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = self._get_input_ids_and_config()
lowerCamelCase : Any = False
lowerCamelCase : int = max_length
lowerCamelCase : Union[str, Any] = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase : Dict = model_class(__magic_name__ )
lowerCamelCase : Dict = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : Any = jit(model.generate )
lowerCamelCase : Union[str, Any] = jit_generate(__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = self._get_input_ids_and_config()
lowerCamelCase : Optional[int] = False
lowerCamelCase : Tuple = max_length
lowerCamelCase : List[Any] = 2
lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase : str = model_class(__magic_name__ )
lowerCamelCase : Any = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = self._get_input_ids_and_config()
lowerCamelCase : List[str] = True
lowerCamelCase : Union[str, Any] = max_length
lowerCamelCase : Dict = 0.8
lowerCamelCase : List[Any] = 1_0
lowerCamelCase : Any = 0.3
lowerCamelCase : Dict = 1
lowerCamelCase : List[str] = 8
lowerCamelCase : Tuple = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase : Optional[int] = model_class(__magic_name__ )
lowerCamelCase : Dict = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : Dict = jit(model.generate )
lowerCamelCase : List[Any] = jit_generate(__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
lowerCamelCase : Union[str, Any] = max_length
lowerCamelCase : str = 1
lowerCamelCase : Optional[int] = 8
lowerCamelCase : List[str] = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase : Union[str, Any] = model_class(__magic_name__ )
lowerCamelCase : List[str] = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : Dict = jit(model.generate )
lowerCamelCase : Dict = jit_generate(__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
lowerCamelCase : str = max_length
lowerCamelCase : str = 2
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : List[str] = 8
lowerCamelCase : List[str] = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase : Union[str, Any] = model_class(__magic_name__ )
lowerCamelCase : Optional[int] = model.generate(__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : Optional[int] = jit(model.generate )
lowerCamelCase : Optional[Any] = jit_generate(__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase : Dict = False
lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : Any = model.generate(__magic_name__ , attention_mask=__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : List[str] = jit(model.generate )
lowerCamelCase : List[str] = jit_generate(__magic_name__ , attention_mask=__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase : Optional[int] = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase : Optional[Any] = True
lowerCamelCase : List[str] = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : Any = model.generate(__magic_name__ , attention_mask=__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : Optional[int] = jit(model.generate )
lowerCamelCase : int = jit_generate(__magic_name__ , attention_mask=__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase : Any = 2
lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase : Union[str, Any] = model_class(__magic_name__ )
lowerCamelCase : Any = model.generate(__magic_name__ , attention_mask=__magic_name__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , __magic_name__ )
lowerCamelCase : str = jit(model.generate )
lowerCamelCase : int = jit_generate(__magic_name__ , attention_mask=__magic_name__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
lowerCamelCase : List[Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCamelCase : Tuple = """Hello world"""
lowerCamelCase : Dict = tokenizer(__magic_name__ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__magic_name__ , """do_samples""" ):
model.generate(__magic_name__ , do_samples=__magic_name__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__magic_name__ , """foo""" ):
lowerCamelCase : Any = {"""foo""": """bar"""}
model.generate(__magic_name__ , **__magic_name__ )
| 287 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCamelCase ={
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
# Initialise PyTorch model
lowerCamelCase : str = XLNetConfig.from_json_file(lowerCamelCase )
lowerCamelCase : str = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowerCamelCase : List[str] = finetuning_task
lowerCamelCase : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : Union[str, Any] = XLNetForSequenceClassification(lowerCamelCase )
elif "squad" in finetuning_task:
lowerCamelCase : Union[str, Any] = finetuning_task
lowerCamelCase : List[Any] = XLNetForQuestionAnswering(lowerCamelCase )
else:
lowerCamelCase : List[str] = XLNetLMHeadModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
lowerCamelCase : List[Any] = os.path.join(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase, lowerCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(lowerCamelCase )}''' )
torch.save(model.state_dict(), lowerCamelCase )
print(F'''Save configuration file to {os.path.abspath(lowerCamelCase )}''' )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_lowerCamelCase =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase =logging.get_logger(__name__)
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__magic_name__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase : List[str] = deprecated_arg[3:]
lowerCamelCase : int = not kwargs.pop(__magic_name__ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowerCamelCase : List[str] = kwargs.pop("""tpu_name""" , self.tpu_name )
lowerCamelCase : Optional[int] = kwargs.pop("""device_idx""" , self.device_idx )
lowerCamelCase : Dict = kwargs.pop("""eager_mode""" , self.eager_mode )
lowerCamelCase : str = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__magic_name__ )
_UpperCAmelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name of TPU"""} , )
_UpperCAmelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Benchmark models in eager model."""})
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
lowerCamelCase : Union[str, Any] = None
if self.tpu:
try:
if self.tpu_name:
lowerCamelCase : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCamelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCamelCase : List[Any] = None
return tpu
@cached_property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCamelCase : Tuple = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowerCamelCase : Dict = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self ):
return self.n_gpu > 0
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A__ :
_UpperCAmelCase : int
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
def _a ( ):
lowerCamelCase : str = Node(1 )
lowerCamelCase : int = Node(2 )
lowerCamelCase : Dict = Node(3 )
lowerCamelCase : int = Node(4 )
lowerCamelCase : List[str] = Node(5 )
return tree
def _a ( lowerCamelCase ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _a ( lowerCamelCase ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _a ( lowerCamelCase ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _a ( lowerCamelCase ):
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def _a ( lowerCamelCase ):
lowerCamelCase : list[Any] = []
if root is None:
return output
lowerCamelCase : Optional[int] = deque([root] )
while process_queue:
lowerCamelCase : Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : list[Any] = []
def populate_output(lowerCamelCase, lowerCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(lowerCamelCase, lowerCamelCase )
return output
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : list[Any] = []
def populate_output(lowerCamelCase, lowerCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(lowerCamelCase, lowerCamelCase )
return output
def _a ( lowerCamelCase ):
if root is None:
return []
lowerCamelCase : list[Sequence[Node | None]] = []
lowerCamelCase : Optional[int] = 0
lowerCamelCase : List[Any] = height(lowerCamelCase )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase : int = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase : Any = 0
return output
def _a ( ): # Main function for testing.
lowerCamelCase : List[Any] = make_tree()
print(F'''In-order Traversal: {inorder(lowerCamelCase )}''' )
print(F'''Pre-order Traversal: {preorder(lowerCamelCase )}''' )
print(F'''Post-order Traversal: {postorder(lowerCamelCase )}''', """\n""" )
print(F'''Height of Tree: {height(lowerCamelCase )}''', """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCamelCase ), """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1, height(lowerCamelCase ) + 1 ):
print(F'''Level {level}:''', get_nodes_from_left_to_right(lowerCamelCase, level=lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 287 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 1 |
import qiskit
def _a ( lowerCamelCase = 2 ):
lowerCamelCase : List[Any] = qubits
# Using Aer's simulator
lowerCamelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase : str = qiskit.QuantumCircuit(lowerCamelCase, lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1, lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1, lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase ) ), list(range(lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase : List[str] = qiskit.execute(lowerCamelCase, lowerCamelCase, shots=1000 )
return job.result().get_counts(lowerCamelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 287 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self , __magic_name__ , __magic_name__=2 , __magic_name__=True , __magic_name__=False , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=3_2 * 4 , __magic_name__=3_2 * 6 , __magic_name__=4 , __magic_name__=3_2 , ):
lowerCamelCase : List[str] = parent
lowerCamelCase : str = batch_size
lowerCamelCase : int = is_training
lowerCamelCase : Dict = use_auxiliary_loss
lowerCamelCase : Union[str, Any] = num_queries
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Any = min_size
lowerCamelCase : Optional[int] = max_size
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : Tuple = mask_feature_size
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
lowerCamelCase : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
lowerCamelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
lowerCamelCase : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase__ ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = self.prepare_config_and_inputs()
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = output.encoder_hidden_states
lowerCamelCase : List[str] = output.pixel_decoder_hidden_states
lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_config.decoder_layers )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ):
with torch.no_grad():
lowerCamelCase : Optional[int] = MaskFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Tuple = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
lowerCamelCase : Dict = model(__magic_name__ , output_hidden_states=__magic_name__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase : str = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
lowerCamelCase : Optional[Any] = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = MaskFormerModelTester(self )
lowerCamelCase : Tuple = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCamelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(__magic_name__ )
lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase : Tuple = MaskFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = (self.model_tester.min_size,) * 2
lowerCamelCase : Dict = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__magic_name__ ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=__magic_name__ ),
"""class_labels""": torch.zeros(2 , 1_0 , device=__magic_name__ ).long(),
}
lowerCamelCase : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__magic_name__ )
lowerCamelCase : List[Any] = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Union[str, Any] = model_class(__magic_name__ ).to(__magic_name__ )
lowerCamelCase : int = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase : str = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
lowerCamelCase : Optional[Any] = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase : Optional[Any] = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
lowerCamelCase : int = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase =1E-4
def _a ( ):
lowerCamelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCamelCase__ ( self ):
lowerCamelCase : str = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__magic_name__ )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCamelCase : int = model(**__magic_name__ )
lowerCamelCase : str = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
lowerCamelCase : List[str] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
lowerCamelCase : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__magic_name__ )
.eval()
)
lowerCamelCase : Tuple = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**__magic_name__ )
# masks_queries_logits
lowerCamelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase : List[Any] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowerCamelCase : List[Any] = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : Union[str, Any] = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__magic_name__ )
.eval()
)
lowerCamelCase : Tuple = self.default_image_processor
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Tuple = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
lowerCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCamelCase : int = model(**__magic_name__ )
# masks_queries_logits
lowerCamelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase : Tuple = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowerCamelCase : Tuple = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
lowerCamelCase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : Union[str, Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__magic_name__ )
.eval()
)
lowerCamelCase : Optional[Any] = self.default_image_processor
lowerCamelCase : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowerCamelCase : Tuple = inputs["""pixel_values"""].to(__magic_name__ )
lowerCamelCase : Union[str, Any] = [el.to(__magic_name__ ) for el in inputs["""mask_labels"""]]
lowerCamelCase : Any = [el.to(__magic_name__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCamelCase : Tuple = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( lowerCamelCase ):
lowerCamelCase : Any = args.pruning_method
lowerCamelCase : Dict = args.threshold
lowerCamelCase : Union[str, Any] = args.model_name_or_path.rstrip("""/""" )
lowerCamelCase : Any = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowerCamelCase : List[Any] = torch.load(os.path.join(lowerCamelCase, """pytorch_model.bin""" ) )
lowerCamelCase : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowerCamelCase : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowerCamelCase : int = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowerCamelCase : Optional[int] = MagnitudeBinarizer.apply(inputs=lowerCamelCase, threshold=lowerCamelCase )
lowerCamelCase : Tuple = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase : Any = name[:-6]
lowerCamelCase : str = model[F'''{prefix_}mask_scores''']
lowerCamelCase : Union[str, Any] = TopKBinarizer.apply(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase : str = name[:-6]
lowerCamelCase : int = model[F'''{prefix_}mask_scores''']
lowerCamelCase : str = ThresholdBinarizer.apply(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase : Tuple = name[:-6]
lowerCamelCase : List[Any] = model[F'''{prefix_}mask_scores''']
lowerCamelCase , lowerCamelCase : Dict = -0.1, 1.1
lowerCamelCase : List[str] = torch.sigmoid(lowerCamelCase )
lowerCamelCase : Optional[int] = s * (r - l) + l
lowerCamelCase : Dict = s_bar.clamp(min=0.0, max=1.0 )
lowerCamelCase : Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowerCamelCase : Optional[int] = os.path.join(
os.path.dirname(lowerCamelCase ), F'''bertarized_{os.path.basename(lowerCamelCase )}''' )
if not os.path.isdir(lowerCamelCase ):
shutil.copytree(lowerCamelCase, lowerCamelCase )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(lowerCamelCase, os.path.join(lowerCamelCase, """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase =parser.parse_args()
main(args)
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_lowerCamelCase ={
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCamelCase ={
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _a ( ):
lowerCamelCase : int = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
lowerCamelCase : str = bs[:]
lowerCamelCase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase : Tuple = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase ) )
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = set()
lowerCamelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : int = char
return pairs
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token
lowerCamelCase : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
lowerCamelCase : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token
lowerCamelCase : List[str] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token
lowerCamelCase : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
lowerCamelCase : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : List[str] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase : Dict = json.load(__magic_name__ )
lowerCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
lowerCamelCase : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase : Union[str, Any] = bytes_to_unicode()
lowerCamelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__magic_name__ , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase : List[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase : Any = {}
lowerCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self ):
return len(self.encoder )
def UpperCamelCase__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , __magic_name__ ):
if token in self.cache:
return self.cache[token]
lowerCamelCase : int = tuple(__magic_name__ )
lowerCamelCase : List[str] = get_pairs(__magic_name__ )
if not pairs:
return token
while True:
lowerCamelCase : int = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : Optional[Any] = bigram
lowerCamelCase : Dict = []
lowerCamelCase : Optional[int] = 0
while i < len(__magic_name__ ):
try:
lowerCamelCase : Union[str, Any] = word.index(__magic_name__ , __magic_name__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : List[str] = j
if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : int = tuple(__magic_name__ )
lowerCamelCase : List[str] = new_word
if len(__magic_name__ ) == 1:
break
else:
lowerCamelCase : Union[str, Any] = get_pairs(__magic_name__ )
lowerCamelCase : List[str] = """ """.join(__magic_name__ )
lowerCamelCase : List[str] = word
return word
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Tuple = []
for token in re.findall(self.pat , __magic_name__ ):
lowerCamelCase : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self , __magic_name__ ):
return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , __magic_name__ ):
return self.decoder.get(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Any = """""".join(__magic_name__ )
lowerCamelCase : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : List[str] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Any = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + """\n""" )
lowerCamelCase : Union[str, Any] = 0
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase : Tuple = token_index
writer.write(""" """.join(__magic_name__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : int = [self.cls_token_id]
lowerCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , **__magic_name__ ):
lowerCamelCase : Optional[int] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()):
lowerCamelCase : Union[str, Any] = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = PaddingStrategy.DO_NOT_PAD , __magic_name__ = None , __magic_name__ = None , ):
lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__magic_name__ , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase : int = len(encoded_inputs["""global_attention_mask"""] ) != len(__magic_name__ )
if needs_to_be_padded:
lowerCamelCase : Optional[Any] = len(__magic_name__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase : Optional[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 287 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 1 |
from timeit import timeit
_lowerCamelCase ={
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : int = len(lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _a ( lowerCamelCase ):
lowerCamelCase : Any = len(lowerCamelCase ) // 2
lowerCamelCase : Optional[Any] = len(lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase ) )
def _a ( lowerCamelCase ):
if len(lowerCamelCase ) <= 2:
return True
if s[0] == s[len(lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _a ( lowerCamelCase ):
return s == s[::-1]
def _a ( lowerCamelCase ):
lowerCamelCase : str = F'''all({name}(key) is value for key, value in test_data.items())'''
lowerCamelCase : Optional[Any] = F'''from __main__ import test_data, {name}'''
lowerCamelCase : int = 50_0000
lowerCamelCase : int = timeit(stmt=lowerCamelCase, setup=lowerCamelCase, number=lowerCamelCase )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 287 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCamelCase =numpy.array([0, 0])
_lowerCamelCase =numpy.array([0.5, 0.8660254])
_lowerCamelCase =numpy.array([1, 0])
_lowerCamelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = initial_vectors
for _ in range(lowerCamelCase ):
lowerCamelCase : Union[str, Any] = iteration_step(lowerCamelCase )
return vectors
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCamelCase : List[Any] = vectors[i + 1]
new_vectors.append(lowerCamelCase )
lowerCamelCase : List[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = numpy.radians(lowerCamelCase )
lowerCamelCase , lowerCamelCase : List[str] = numpy.cos(lowerCamelCase ), numpy.sin(lowerCamelCase )
lowerCamelCase : int = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCamelCase , lowerCamelCase : List[str] = zip(*lowerCamelCase )
plt.plot(lowerCamelCase, lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 287 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 1 |
class A__ :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = name
lowerCamelCase : Optional[int] = value
lowerCamelCase : str = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCamelCase__ ( self ):
return self.value
def UpperCamelCase__ ( self ):
return self.name
def UpperCamelCase__ ( self ):
return self.weight
def UpperCamelCase__ ( self ):
return self.value / self.weight
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = sorted(lowerCamelCase, key=lowerCamelCase, reverse=lowerCamelCase )
lowerCamelCase : Tuple = []
lowerCamelCase , lowerCamelCase : List[str] = 0.0, 0.0
for i in range(len(lowerCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
_lowerCamelCase ="""https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowerCamelCase =BASE_URL + """/user"""
# https://github.com/settings/tokens
_lowerCamelCase =os.environ.get("""USER_TOKEN""", """""")
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowerCamelCase, headers=lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 287 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 1 |
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def update_area_of_max_square(lowerCamelCase, lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCamelCase : List[Any] = update_area_of_max_square(lowerCamelCase, col + 1 )
lowerCamelCase : Dict = update_area_of_max_square(row + 1, col + 1 )
lowerCamelCase : List[Any] = update_area_of_max_square(row + 1, lowerCamelCase )
if mat[row][col]:
lowerCamelCase : Dict = 1 + min([right, diagonal, down] )
lowerCamelCase : Tuple = max(largest_square_area[0], lowerCamelCase )
return sub_problem_sol
else:
return 0
lowerCamelCase : Optional[Any] = [0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def update_area_of_max_square_using_dp_array(
lowerCamelCase, lowerCamelCase, lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCamelCase : str = update_area_of_max_square_using_dp_array(lowerCamelCase, col + 1, lowerCamelCase )
lowerCamelCase : Any = update_area_of_max_square_using_dp_array(row + 1, col + 1, lowerCamelCase )
lowerCamelCase : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1, lowerCamelCase, lowerCamelCase )
if mat[row][col]:
lowerCamelCase : Dict = 1 + min([right, diagonal, down] )
lowerCamelCase : Tuple = max(largest_square_area[0], lowerCamelCase )
lowerCamelCase : Any = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCamelCase : Dict = [0]
lowerCamelCase : Tuple = [[-1] * cols for _ in range(lowerCamelCase )]
update_area_of_max_square_using_dp_array(0, 0, lowerCamelCase )
return largest_square_area[0]
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Tuple = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCamelCase : Any = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
lowerCamelCase : Tuple = dp_array[row][col + 1]
lowerCamelCase : Optional[int] = dp_array[row + 1][col + 1]
lowerCamelCase : str = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCamelCase : Tuple = 1 + min(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = max(dp_array[row][col], lowerCamelCase )
else:
lowerCamelCase : str = 0
return largest_square_area
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = [0] * (cols + 1)
lowerCamelCase : Optional[int] = [0] * (cols + 1)
lowerCamelCase : Any = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
lowerCamelCase : Optional[int] = current_row[col + 1]
lowerCamelCase : str = next_row[col + 1]
lowerCamelCase : Dict = next_row[col]
if mat[row][col] == 1:
lowerCamelCase : List[str] = 1 + min(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[str] = max(current_row[col], lowerCamelCase )
else:
lowerCamelCase : Any = 0
lowerCamelCase : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 287 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def get_masked_lm_array(lowerCamelCase ):
lowerCamelCase : Optional[int] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : Union[str, Any] = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
if "kernel" in name:
lowerCamelCase : int = array.transpose()
return torch.from_numpy(lowerCamelCase )
def get_encoder_array(lowerCamelCase ):
lowerCamelCase : Optional[int] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : Optional[int] = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
if "kernel" in name:
lowerCamelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCamelCase )
def get_encoder_layer_array(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : Tuple = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
if "kernel" in name:
lowerCamelCase : Dict = array.transpose()
return torch.from_numpy(lowerCamelCase )
def get_encoder_attention_layer_array(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowerCamelCase : str = tf.train.load_variable(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[str] = array.reshape(lowerCamelCase )
if "kernel" in name:
lowerCamelCase : Optional[Any] = array.transpose()
return torch.from_numpy(lowerCamelCase )
print(F'''Loading model based on config from {config_path}...''' )
lowerCamelCase : Tuple = BertConfig.from_json_file(lowerCamelCase )
lowerCamelCase : Optional[Any] = BertForMaskedLM(lowerCamelCase )
# Layers
for layer_index in range(0, config.num_hidden_layers ):
lowerCamelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowerCamelCase : BertSelfAttention = layer.attention.self
lowerCamelCase : Dict = get_encoder_attention_layer_array(
lowerCamelCase, """_query_dense/kernel""", self_attn.query.weight.data.shape )
lowerCamelCase : Any = get_encoder_attention_layer_array(
lowerCamelCase, """_query_dense/bias""", self_attn.query.bias.data.shape )
lowerCamelCase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase, """_key_dense/kernel""", self_attn.key.weight.data.shape )
lowerCamelCase : List[str] = get_encoder_attention_layer_array(
lowerCamelCase, """_key_dense/bias""", self_attn.key.bias.data.shape )
lowerCamelCase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase, """_value_dense/kernel""", self_attn.value.weight.data.shape )
lowerCamelCase : Optional[int] = get_encoder_attention_layer_array(
lowerCamelCase, """_value_dense/bias""", self_attn.value.bias.data.shape )
# Self-attention Output
lowerCamelCase : BertSelfOutput = layer.attention.output
lowerCamelCase : List[Any] = get_encoder_attention_layer_array(
lowerCamelCase, """_output_dense/kernel""", self_output.dense.weight.data.shape )
lowerCamelCase : int = get_encoder_attention_layer_array(
lowerCamelCase, """_output_dense/bias""", self_output.dense.bias.data.shape )
lowerCamelCase : Dict = get_encoder_layer_array(lowerCamelCase, """_attention_layer_norm/gamma""" )
lowerCamelCase : int = get_encoder_layer_array(lowerCamelCase, """_attention_layer_norm/beta""" )
# Intermediate
lowerCamelCase : BertIntermediate = layer.intermediate
lowerCamelCase : Optional[int] = get_encoder_layer_array(lowerCamelCase, """_intermediate_dense/kernel""" )
lowerCamelCase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase, """_intermediate_dense/bias""" )
# Output
lowerCamelCase : BertOutput = layer.output
lowerCamelCase : Tuple = get_encoder_layer_array(lowerCamelCase, """_output_dense/kernel""" )
lowerCamelCase : List[str] = get_encoder_layer_array(lowerCamelCase, """_output_dense/bias""" )
lowerCamelCase : Dict = get_encoder_layer_array(lowerCamelCase, """_output_layer_norm/gamma""" )
lowerCamelCase : List[str] = get_encoder_layer_array(lowerCamelCase, """_output_layer_norm/beta""" )
# Embeddings
lowerCamelCase : Any = get_encoder_array("""_position_embedding_layer/embeddings""" )
lowerCamelCase : Dict = get_encoder_array("""_type_embedding_layer/embeddings""" )
lowerCamelCase : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/gamma""" )
lowerCamelCase : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
lowerCamelCase : Any = model.cls.predictions.transform
lowerCamelCase : Optional[Any] = get_masked_lm_array("""dense/kernel""" )
lowerCamelCase : Optional[int] = get_masked_lm_array("""dense/bias""" )
lowerCamelCase : Dict = get_masked_lm_array("""layer_norm/gamma""" )
lowerCamelCase : Optional[int] = get_masked_lm_array("""layer_norm/beta""" )
lowerCamelCase : List[str] = get_masked_lm_array("""embedding_table""" )
# Pooling
lowerCamelCase : int = BertPooler(config=lowerCamelCase )
lowerCamelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
lowerCamelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCamelCase )
# Integration test - should load without any errors ;)
lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(lowerCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCamelCase =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def UpperCamelCase__ ( *__magic_name__ , **__magic_name__ ):
pass
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
_UpperCAmelCase : Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = DepthEstimationPipeline(model=__magic_name__ , image_processor=__magic_name__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __magic_name__ )
import datasets
lowerCamelCase : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase : Optional[Any] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __magic_name__ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def UpperCamelCase__ ( self ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """Intel/dpt-large"""
lowerCamelCase : Dict = pipeline("""depth-estimation""" , model=__magic_name__ )
lowerCamelCase : Tuple = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase : Optional[Any] = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_0_0 , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=4 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=None , __magic_name__=[0, 1, 2, 3] , ):
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = 1_0_0
lowerCamelCase : int = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = type_sequence_label_size
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Tuple = scope
lowerCamelCase : Any = out_indices
lowerCamelCase : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase : int = (image_size // patch_size) ** 2
lowerCamelCase : List[str] = num_patches + 1
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = self.type_sequence_label_size
lowerCamelCase : Tuple = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Any = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Optional[Any] = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = self.num_labels
lowerCamelCase : List[str] = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : int = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = config_and_inputs
lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : int = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Optional[int] = False
def UpperCamelCase__ ( self ):
lowerCamelCase : str = BeitModelTester(self )
lowerCamelCase : int = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
lowerCamelCase : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
lowerCamelCase : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
lowerCamelCase : Any = model(**__magic_name__ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase : List[str] = False
lowerCamelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase : Tuple = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
lowerCamelCase : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
lowerCamelCase : Any = model(**__magic_name__ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCamelCase__ ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Tuple = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__magic_name__ )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : Dict = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
lowerCamelCase : str = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Tuple = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
lowerCamelCase : List[Any] = outputs.logits
# verify the logits
lowerCamelCase : List[str] = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , __magic_name__ )
lowerCamelCase : str = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__magic_name__ )
lowerCamelCase : Optional[Any] = self.default_image_processor
lowerCamelCase : List[str] = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**__magic_name__ )
lowerCamelCase : Dict = outputs.logits
# verify the logits
lowerCamelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , __magic_name__ )
lowerCamelCase : Optional[int] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
lowerCamelCase : str = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
__magic_name__ )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__magic_name__ )
lowerCamelCase : str = outputs.logits
# verify the logits
lowerCamelCase : str = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , __magic_name__ )
lowerCamelCase : Union[str, Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
lowerCamelCase : Union[str, Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
lowerCamelCase : Any = model.to(__magic_name__ )
lowerCamelCase : Union[str, Any] = BeitImageProcessor(do_resize=__magic_name__ , size=6_4_0 , do_center_crop=__magic_name__ )
lowerCamelCase : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase : Any = Image.open(ds[0]["""file"""] )
lowerCamelCase : Any = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : List[Any] = model(**__magic_name__ )
lowerCamelCase : Any = outputs.logits
# verify the logits
lowerCamelCase : Optional[Any] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , __magic_name__ )
lowerCamelCase : Any = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=__magic_name__ , )
else:
lowerCamelCase : Tuple = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
lowerCamelCase : Optional[int] = model.to(__magic_name__ )
lowerCamelCase : Optional[int] = BeitImageProcessor(do_resize=__magic_name__ , size=6_4_0 , do_center_crop=__magic_name__ )
lowerCamelCase : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase : Optional[Any] = Image.open(ds[0]["""file"""] )
lowerCamelCase : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**__magic_name__ )
lowerCamelCase : Tuple = outputs.logits.detach().cpu()
lowerCamelCase : str = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(5_0_0, 3_0_0)] )
lowerCamelCase : int = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
lowerCamelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
lowerCamelCase : List[Any] = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 287 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=True , __magic_name__=1 / 2_5_5 , __magic_name__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase : Any = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : List[str] = min_resolution
lowerCamelCase : Optional[int] = max_resolution
lowerCamelCase : int = do_resize
lowerCamelCase : Dict = size
lowerCamelCase : List[str] = do_normalize
lowerCamelCase : int = image_mean
lowerCamelCase : List[str] = image_std
lowerCamelCase : Union[str, Any] = do_rescale
lowerCamelCase : int = rescale_factor
lowerCamelCase : Dict = do_pad
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False ):
if not batched:
lowerCamelCase : Any = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
lowerCamelCase , lowerCamelCase : int = image.size
else:
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase : List[Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCamelCase : Tuple = self.size["""shortest_edge"""]
elif w > h:
lowerCamelCase : str = self.size["""shortest_edge"""]
lowerCamelCase : int = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCamelCase : Any = self.size["""shortest_edge"""]
lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""]
else:
lowerCamelCase : Any = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase : List[Any] = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
lowerCamelCase : str = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : str = YolosImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
lowerCamelCase : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Any = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase : str = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : List[str] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Any = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : int = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : str = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processings
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase : str = self.image_processing_class(do_resize=__magic_name__ , do_normalize=__magic_name__ , do_rescale=__magic_name__ )
# create random PyTorch tensors
lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase : Dict = image_processing_a.pad(__magic_name__ , return_tensors="""pt""" )
lowerCamelCase : List[Any] = image_processing_a(__magic_name__ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ):
# prepare image and target
lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCamelCase : List[Any] = json.loads(f.read() )
lowerCamelCase : Optional[int] = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
lowerCamelCase : str = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCamelCase : Dict = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __magic_name__ )
lowerCamelCase : Tuple = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
lowerCamelCase : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __magic_name__ ) )
# verify boxes
lowerCamelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __magic_name__ )
lowerCamelCase : List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
lowerCamelCase : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __magic_name__ ) )
# verify is_crowd
lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __magic_name__ ) )
# verify class_labels
lowerCamelCase : str = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __magic_name__ ) )
# verify orig_size
lowerCamelCase : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __magic_name__ ) )
# verify size
lowerCamelCase : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
# prepare image, target and masks_path
lowerCamelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCamelCase : str = json.loads(f.read() )
lowerCamelCase : List[str] = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
lowerCamelCase : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCamelCase : Tuple = YolosImageProcessor(format="""coco_panoptic""" )
lowerCamelCase : Tuple = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __magic_name__ )
lowerCamelCase : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
lowerCamelCase : str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __magic_name__ ) )
# verify boxes
lowerCamelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __magic_name__ )
lowerCamelCase : Dict = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
lowerCamelCase : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __magic_name__ ) )
# verify is_crowd
lowerCamelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __magic_name__ ) )
# verify class_labels
lowerCamelCase : List[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __magic_name__ ) )
# verify masks
lowerCamelCase : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __magic_name__ )
# verify orig_size
lowerCamelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __magic_name__ ) )
# verify size
lowerCamelCase : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __magic_name__ ) )
| 287 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase ={
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 1 |
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def count_of_possible_combinations(lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def count_of_possible_combinations_with_dp_array(
lowerCamelCase, lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase : int = sum(
count_of_possible_combinations_with_dp_array(target - item, lowerCamelCase )
for item in array )
lowerCamelCase : Optional[int] = answer
return answer
lowerCamelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = [0] * (target + 1)
lowerCamelCase : Dict = 1
for i in range(1, target + 1 ):
for j in range(lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =3
_lowerCamelCase =5
_lowerCamelCase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 287 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ):
lowerCamelCase : Tuple = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowerCamelCase : List[Any] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = dct.pop(lowerCamelCase )
lowerCamelCase : Dict = val
def _a ( lowerCamelCase, lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase : Any = torch.cat((q_bias, torch.zeros_like(lowerCamelCase, requires_grad=lowerCamelCase ), v_bias) )
lowerCamelCase : Union[str, Any] = qkv_bias
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = 364 if """coco""" in model_name else 224
lowerCamelCase : int = BlipaVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""", eos_token_id=lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase : Any = OPTConfig.from_pretrained("""facebook/opt-6.7b""", eos_token_id=lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase : str = TaConfig.from_pretrained("""google/flan-t5-xl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase : List[str] = TaConfig.from_pretrained("""google/flan-t5-xxl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict()
lowerCamelCase : Dict = BlipaConfig(vision_config=lowerCamelCase, text_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False ):
lowerCamelCase : Optional[int] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowerCamelCase : Any = tokenizer("""\n""", add_special_tokens=lowerCamelCase ).input_ids[0]
lowerCamelCase , lowerCamelCase : List[Any] = get_blipa_config(lowerCamelCase, eos_token_id=lowerCamelCase )
lowerCamelCase : List[Any] = BlipaForConditionalGeneration(lowerCamelCase ).eval()
lowerCamelCase : List[str] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowerCamelCase , lowerCamelCase : Dict = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowerCamelCase : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = load_model_and_preprocess(
name=lowerCamelCase, model_type=lowerCamelCase, is_eval=lowerCamelCase, device=lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowerCamelCase : Dict = original_model.state_dict()
lowerCamelCase : Tuple = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase : Dict = state_dict.pop(lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
lowerCamelCase : Tuple = key.replace("""Qformer.bert""", """qformer""" )
if "attention.self" in key:
lowerCamelCase : Optional[Any] = key.replace("""self""", """attention""" )
if "opt_proj" in key:
lowerCamelCase : Optional[Any] = key.replace("""opt_proj""", """language_projection""" )
if "t5_proj" in key:
lowerCamelCase : int = key.replace("""t5_proj""", """language_projection""" )
if key.startswith("""opt""" ):
lowerCamelCase : Any = key.replace("""opt""", """language""" )
if key.startswith("""t5""" ):
lowerCamelCase : List[Any] = key.replace("""t5""", """language""" )
lowerCamelCase : Dict = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : List[Any] = hf_model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert len(lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase : str = load_demo_image()
lowerCamelCase : Optional[int] = vis_processors["""eval"""](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
lowerCamelCase : Tuple = tokenizer(["""\n"""], return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
# create processor
lowerCamelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size}, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : str = BlipaProcessor(image_processor=lowerCamelCase, tokenizer=lowerCamelCase )
lowerCamelCase : Dict = processor(images=lowerCamelCase, return_tensors="""pt""" ).pixel_values.to(lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase, lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase : List[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowerCamelCase : List[str] = hf_model(lowerCamelCase, lowerCamelCase ).logits
else:
lowerCamelCase : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowerCamelCase : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 )
lowerCamelCase : int = hf_model(lowerCamelCase, lowerCamelCase, labels=lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""", original_logits[0, :3, :3] )
print("""First values of HF logits:""", logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase : Dict = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]], device=lowerCamelCase )
assert torch.allclose(logits[0, :3, :3], lowerCamelCase, atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase : Dict = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]], device=lowerCamelCase )
else:
# cast to same type
lowerCamelCase : List[str] = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase ), lowerCamelCase, atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowerCamelCase : Optional[int] = """"""
lowerCamelCase : Any = tokenizer(lowerCamelCase, return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
lowerCamelCase : Dict = original_model.generate({"""image""": original_pixel_values} )
lowerCamelCase : Union[str, Any] = hf_model.generate(
lowerCamelCase, lowerCamelCase, do_sample=lowerCamelCase, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, )
print("""Original generation:""", lowerCamelCase )
lowerCamelCase : Optional[Any] = input_ids.shape[1]
lowerCamelCase : Any = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=lowerCamelCase )
lowerCamelCase : str = [text.strip() for text in output_text]
print("""HF generation:""", lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
_lowerCamelCase =[
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
_lowerCamelCase =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase : Optional[int] = """BlipImageProcessor"""
_UpperCAmelCase : List[str] = """AutoTokenizer"""
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = False
super().__init__(__magic_name__ , __magic_name__ )
lowerCamelCase : Dict = self.image_processor
def __call__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCamelCase : List[str] = self.tokenizer
lowerCamelCase : str = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
return text_encoding
# add pixel_values
lowerCamelCase : List[Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
if text is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
else:
lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(__magic_name__ )
return encoding_image_processor
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : List[str] = tokenizer(example["""content"""], truncation=lowerCamelCase )["""input_ids"""]
lowerCamelCase : Dict = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_lowerCamelCase =HfArgumentParser(PretokenizationArguments)
_lowerCamelCase =parser.parse_args()
if args.num_workers is None:
_lowerCamelCase =multiprocessing.cpu_count()
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCamelCase =time.time()
_lowerCamelCase =load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCamelCase =time.time()
_lowerCamelCase =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCamelCase =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 287 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 1 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 1 |
_lowerCamelCase =[sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCamelCase =[None] * 1_0_0_0_0_0_0_0
_lowerCamelCase =True
_lowerCamelCase =False
def _a ( lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase : Tuple = chain(next_number(lowerCamelCase ) )
lowerCamelCase : Dict = number_chain
while number < 1000_0000:
lowerCamelCase : Optional[int] = number_chain
number *= 10
return number_chain
def _a ( lowerCamelCase = 1000_0000 ):
for i in range(1, lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Dict = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase : List[str] = {"""unk_token""": """<unk>"""}
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
lowerCamelCase : Optional[int] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase : List[str] = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , **__magic_name__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self , **__magic_name__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self , **__magic_name__ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Tuple = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
lowerCamelCase : Dict = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase : Optional[Any] = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCamelCase : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Union[str, Any] = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : List[str] = self.prepare_image_inputs()
lowerCamelCase : Dict = image_processor(__magic_name__ , return_tensors="""np""" )
lowerCamelCase : Tuple = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Union[str, Any] = """lower newer"""
lowerCamelCase : Dict = processor(text=__magic_name__ )
lowerCamelCase : Dict = tokenizer(__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : List[str] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Optional[Any] = """lower newer"""
lowerCamelCase : List[Any] = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : Union[str, Any] = processor.batch_decode(__magic_name__ )
lowerCamelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.get_image_processor()
lowerCamelCase : Tuple = self.get_tokenizer()
lowerCamelCase : List[Any] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Tuple = """lower newer"""
lowerCamelCase : int = self.prepare_image_inputs()
lowerCamelCase : Tuple = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
_lowerCamelCase ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 287 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import re
import string
import numpy as np
import datasets
_lowerCamelCase ="""
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_lowerCamelCase ="""
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_lowerCamelCase ="""
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase : Any = np.array([re.sub(__magic_name__ , """""" , __magic_name__ ) for x in predictions] )
lowerCamelCase : List[Any] = np.array([re.sub(__magic_name__ , """""" , __magic_name__ ) for x in references] )
else:
lowerCamelCase : int = np.asarray(__magic_name__ )
lowerCamelCase : Optional[Any] = np.asarray(__magic_name__ )
if ignore_case:
lowerCamelCase : int = np.char.lower(__magic_name__ )
lowerCamelCase : Union[str, Any] = np.char.lower(__magic_name__ )
if ignore_punctuation:
lowerCamelCase : Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCamelCase : Any = np.char.translate(__magic_name__ , table=__magic_name__ )
lowerCamelCase : Any = np.char.translate(__magic_name__ , table=__magic_name__ )
if ignore_numbers:
lowerCamelCase : List[str] = string.digits.maketrans("""""" , """""" , string.digits )
lowerCamelCase : str = np.char.translate(__magic_name__ , table=__magic_name__ )
lowerCamelCase : List[Any] = np.char.translate(__magic_name__ , table=__magic_name__ )
lowerCamelCase : int = predictions == references
return {"exact_match": np.mean(__magic_name__ ) * 1_0_0}
| 287 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 1 |
from collections import deque
def _a ( lowerCamelCase ):
lowerCamelCase : Dict = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = deque()
lowerCamelCase : Optional[Any] = [False for _ in range(lowerCamelCase )]
lowerCamelCase : Optional[int] = [-1 for _ in range(lowerCamelCase )]
lowerCamelCase : Tuple = index_of[:]
def strong_connect(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = index # the number when this node is seen
lowerCamelCase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase )
lowerCamelCase : Tuple = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase : int = strong_connect(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase : Optional[int] = []
lowerCamelCase : str = stack.pop()
lowerCamelCase : int = False
component.append(lowerCamelCase )
while w != v:
lowerCamelCase : Tuple = stack.pop()
lowerCamelCase : int = False
component.append(lowerCamelCase )
components.append(lowerCamelCase )
return index
lowerCamelCase : Any = []
for v in range(lowerCamelCase ):
if index_of[v] == -1:
strong_connect(lowerCamelCase, 0, lowerCamelCase )
return components
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = [[] for _ in range(lowerCamelCase )]
for u, v in edges:
g[u].append(lowerCamelCase )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase =7
_lowerCamelCase =[0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase =[1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase =[(u, v) for u, v in zip(source, target)]
_lowerCamelCase =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 287 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """vocab.txt"""}
_lowerCamelCase ={
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
_lowerCamelCase ={
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def _a ( lowerCamelCase ):
with open(lowerCamelCase, """r""" ) as f:
lowerCamelCase : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__ , __magic_name__="<unk>" , __magic_name__="<cls>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__="<eos>" , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : str = load_vocab_file(__magic_name__ )
lowerCamelCase : Optional[int] = dict(enumerate(self.all_tokens ) )
lowerCamelCase : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase : str = unk_token
lowerCamelCase : Any = cls_token
lowerCamelCase : Optional[int] = pad_token
lowerCamelCase : List[str] = mask_token
lowerCamelCase : Any = eos_token
lowerCamelCase : List[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCamelCase__ ( self , __magic_name__ ):
return self._id_to_token.get(__magic_name__ , self.unk_token )
def UpperCamelCase__ ( self , __magic_name__ ):
return self._token_to_id.get(__magic_name__ , self._token_to_id.get(self.unk_token ) )
def UpperCamelCase__ ( self , __magic_name__ , **__magic_name__ ):
return text.split()
def UpperCamelCase__ ( self , __magic_name__=False ):
return len(self._id_to_token )
def UpperCamelCase__ ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCamelCase__ ( self , __magic_name__ ):
return self._token_to_id.get(__magic_name__ , self._token_to_id.get(self.unk_token ) )
def UpperCamelCase__ ( self , __magic_name__ ):
return self._id_to_token.get(__magic_name__ , self.unk_token )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : str = [self.cls_token_id]
lowerCamelCase : int = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase : Optional[int] = [1] + ([0] * len(__magic_name__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(__magic_name__ ) + [1]
return mask
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = os.path.join(__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__magic_name__ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCamelCase__ ( self ):
return self.get_vocab_size(with_added_tokens=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
return super()._add_tokens(__magic_name__ , special_tokens=__magic_name__ )
| 287 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 1 |
from __future__ import annotations
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : Any = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__magic_name__ ) != 0:
lowerCamelCase : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__magic_name__ ) != cols:
raise error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise error
lowerCamelCase : Optional[int] = rows
else:
lowerCamelCase : List[str] = []
def UpperCamelCase__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase__ ( self ):
return len(self.rows )
@property
def UpperCamelCase__ ( self ):
return len(self.rows[0] )
@property
def UpperCamelCase__ ( self ):
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase__ ( self ):
return self.order[0] == self.order[1]
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__magic_name__ )
def UpperCamelCase__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase__ ( self ):
return bool(self.determinant() )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__magic_name__ ).determinant()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if (row + column) % 2 == 0:
return self.get_minor(__magic_name__ , __magic_name__ )
return -1 * self.get_minor(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
return Matrix(
[
[self.get_minor(__magic_name__ , __magic_name__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__magic_name__ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Optional[int] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__magic_name__ )
else:
lowerCamelCase : Union[str, Any] = self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : int = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in column:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCamelCase : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCamelCase : Union[str, Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __magic_name__ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , __magic_name__ ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , __magic_name__ ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , __magic_name__ ):
if isinstance(__magic_name__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__magic_name__ , __magic_name__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__magic_name__ , __magic_name__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCamelCase : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ ):
return sum(row[i] * column[i] for i in range(len(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = RobertaConfig
_UpperCAmelCase : List[str] = """roberta"""
def __init__( self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase : Optional[int] = RobertaEmbeddings(__magic_name__ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = RobertaConfig
_UpperCAmelCase : List[str] = """roberta"""
def __init__( self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase : Optional[int] = config.num_labels
lowerCamelCase : List[str] = config.num_hidden_layers
lowerCamelCase : Union[str, Any] = DeeRobertaModel(__magic_name__ )
lowerCamelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ):
lowerCamelCase : Union[str, Any] = self.num_layers
try:
lowerCamelCase : Tuple = self.roberta(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
lowerCamelCase : str = outputs[1]
lowerCamelCase : str = self.dropout(__magic_name__ )
lowerCamelCase : List[Any] = self.classifier(__magic_name__ )
lowerCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase : Optional[int] = e.message
lowerCamelCase : Union[str, Any] = e.exit_layer
lowerCamelCase : Any = outputs[0]
if not self.training:
lowerCamelCase : Dict = entropy(__magic_name__ )
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase : str = MSELoss()
lowerCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase : Any = CrossEntropyLoss()
lowerCamelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase : Any = []
for highway_exit in outputs[-1]:
lowerCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase : int = MSELoss()
lowerCamelCase : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase : Union[str, Any] = CrossEntropyLoss()
lowerCamelCase : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
lowerCamelCase : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase : Dict = (loss,) + outputs
if not self.training:
lowerCamelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """biogpt"""
def __init__( self , __magic_name__=4_2_3_8_4 , __magic_name__=1_0_2_4 , __magic_name__=2_4 , __magic_name__=1_6 , __magic_name__=4_0_9_6 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0_2_4 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ):
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : int = max_position_embeddings
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : List[Any] = attention_probs_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[int] = scale_embedding
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : List[str] = layerdrop
lowerCamelCase : int = activation_dropout
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 287 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ): # noqa: E741
while r - l > 1:
lowerCamelCase : Any = (l + r) // 2
if v[m] >= key:
lowerCamelCase : List[str] = m
else:
lowerCamelCase : Dict = m # noqa: E741
return r
def _a ( lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return 0
lowerCamelCase : List[Any] = [0] * len(lowerCamelCase )
lowerCamelCase : Optional[int] = 1
lowerCamelCase : str = v[0]
for i in range(1, len(lowerCamelCase ) ):
if v[i] < tail[0]:
lowerCamelCase : int = v[i]
elif v[i] > tail[length - 1]:
lowerCamelCase : Optional[Any] = v[i]
length += 1
else:
lowerCamelCase : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = """bert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : str = hidden_act
lowerCamelCase : int = intermediate_size
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Tuple = position_embedding_type
lowerCamelCase : Optional[Any] = use_cache
lowerCamelCase : int = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCamelCase : List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCamelCase : str = """fp16"""
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def UpperCamelCase__ ( self ):
# pass variant but use the non-variant filenames
lowerCamelCase : Tuple = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCamelCase : Union[str, Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase : Optional[int] = """fp16"""
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCamelCase : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def UpperCamelCase__ ( self ):
# pass variant but use the non-variant filenames
lowerCamelCase : Optional[Any] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCamelCase : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCamelCase : Optional[Any] = """fp16"""
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
| 287 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
lowerCamelCase : Optional[int] = nn.Parameter(lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
lowerCamelCase : Union[str, Any] = nn.Parameter(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# set torch weights for 1-to-1 comparison
lowerCamelCase : Optional[int] = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(lowerCamelCase ).view(-1, lowerCamelCase ).contiguous().transpose(0, 1 ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# set torch weights for 1-to-1 comparison
lowerCamelCase : Optional[int] = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : Optional[Any] = np.asarray(weights[2] )
lowerCamelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.self_attention.key, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(lowerCamelCase ).transpose(1, 2 ).contiguous().view(-1, lowerCamelCase ), )
set_param(
torch_layer.output.dense, torch.tensor(lowerCamelCase ).view(-1, lowerCamelCase ).contiguous().transpose(0, 1 ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# layernorm 1
lowerCamelCase : List[Any] = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase ), )
# lsh weights + output
lowerCamelCase : int = weights[0][1]
if len(lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase, torch_block.attention, lowerCamelCase )
else:
set_layer_weights_in_torch_local(lowerCamelCase, torch_block.attention, lowerCamelCase )
# intermediate weighs
lowerCamelCase : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase ) == 4:
lowerCamelCase : List[str] = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Dict = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase ), )
# intermediate dense
lowerCamelCase : Any = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(lowerCamelCase ), )
# intermediate out
lowerCamelCase : Dict = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : Any = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(lowerCamelCase ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# reformer model
lowerCamelCase : str = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(lowerCamelCase ), )
if isinstance(weights[3], lowerCamelCase ):
lowerCamelCase : List[str] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
lowerCamelCase : str = nn.Parameter(torch.tensor(lowerCamelCase ) )
lowerCamelCase : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# output layer norm
lowerCamelCase : Optional[int] = np.asarray(weights[7][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(lowerCamelCase ), torch.tensor(lowerCamelCase ), )
# output embeddings
lowerCamelCase : Optional[int] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(lowerCamelCase ).transpose(0, 1 ).contiguous(), torch.tensor(lowerCamelCase ), )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : Any = ReformerConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : List[str] = ReformerModelWithLMHead(lowerCamelCase )
with open(lowerCamelCase, """rb""" ) as f:
lowerCamelCase : Dict = pickle.load(lowerCamelCase )["""weights"""]
set_model_weights_in_torch(lowerCamelCase, lowerCamelCase, config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 287 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase : Dict = BlipImageProcessor()
lowerCamelCase : int = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowerCamelCase : List[Any] = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **__magic_name__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def UpperCamelCase__ ( self , **__magic_name__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase : Optional[Any] = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCamelCase : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : int = self.prepare_image_inputs()
lowerCamelCase : Tuple = image_processor(__magic_name__ , return_tensors="""np""" )
lowerCamelCase : Union[str, Any] = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : int = """lower newer"""
lowerCamelCase : str = processor(text=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase : Any = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Optional[int] = """lower newer"""
lowerCamelCase : int = self.prepare_image_inputs()
lowerCamelCase : List[Any] = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Tuple = self.get_tokenizer()
lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : Any = processor.batch_decode(__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : Dict = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Tuple = """lower newer"""
lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase : List[str] = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 287 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCamelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
return quad(lowerCamelCase, 0, lowerCamelCase, args=(lowerCamelCase) )[0]
def _a ( lowerCamelCase, lowerCamelCase ):
return math.pow(lowerCamelCase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 287 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowerCamelCase =logging.getLogger(__name__)
_lowerCamelCase =list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowerCamelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__SCREAMING_SNAKE_CASE)} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCamelCase__ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_UpperCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input training data file (a text file)."""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_UpperCAmelCase : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_UpperCAmelCase : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def UpperCamelCase__ ( self ):
if self.train_file is not None:
lowerCamelCase : Any = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, """r""", encoding="""utf-8""" ) as f:
lowerCamelCase : Optional[int] = [json.loads(lowerCamelCase ) for line in f.read().splitlines() if (len(lowerCamelCase ) > 0 and not line.isspace())]
assert len(lowerCamelCase ) == len(lowerCamelCase )
lowerCamelCase : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase : Dict = refs
return Dataset.from_dict(lowerCamelCase )
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : Dict = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''train[:{data_args.validation_split_percentage}%]''', )
lowerCamelCase : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''train[{data_args.validation_split_percentage}%:]''', )
else:
lowerCamelCase : Any = {}
if data_args.train_file is not None:
lowerCamelCase : List[str] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase : int = data_args.validation_file
lowerCamelCase : List[str] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase : Tuple = """text"""
lowerCamelCase : Optional[int] = load_dataset(lowerCamelCase, data_files=lowerCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase : List[str] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase : Any = AutoConfig.from_pretrained(model_args.config_name, **lowerCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCamelCase )
else:
lowerCamelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCamelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase : Dict = AutoModelForMaskedLM.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase : Union[str, Any] = datasets["""train"""].column_names
else:
lowerCamelCase : List[Any] = datasets["""validation"""].column_names
lowerCamelCase : Dict = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase : Dict = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase ):
# Remove empty lines
lowerCamelCase : List[Any] = [line for line in examples["""text"""] if len(lowerCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""], padding=lowerCamelCase, truncation=lowerCamelCase, max_length=data_args.max_seq_length )
lowerCamelCase : Any = datasets.map(
lowerCamelCase, batched=lowerCamelCase, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase : Optional[int] = add_chinese_references(tokenized_datasets["""train"""], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase : Union[str, Any] = add_chinese_references(
tokenized_datasets["""validation"""], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase : List[str] = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase : int = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None, eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None, tokenizer=lowerCamelCase, data_collator=lowerCamelCase, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase : List[str] = model_args.model_name_or_path
else:
lowerCamelCase : str = None
lowerCamelCase : str = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase : Tuple = os.path.join(training_args.output_dir, """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCamelCase, """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) )
# Evaluation
lowerCamelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : List[str] = trainer.evaluate()
lowerCamelCase : List[Any] = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase : int = perplexity
lowerCamelCase : List[str] = os.path.join(training_args.output_dir, """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCamelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = OmegaConf.load(lowerCamelCase )
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase : List[str] = {}
lowerCamelCase : Optional[Any] = """first_stage_model."""
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCamelCase : str = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase : List[Any] = {}
lowerCamelCase : Optional[int] = """model.diffusion_model."""
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCamelCase : int = state_dict[key]
lowerCamelCase : Union[str, Any] = config.model.params.first_stage_config.params
lowerCamelCase : Optional[int] = config.model.params.unet_config.params
lowerCamelCase : Optional[Any] = VQModel(**lowerCamelCase ).eval()
vqvae.load_state_dict(lowerCamelCase )
lowerCamelCase : Tuple = UNetLDMModel(**lowerCamelCase ).eval()
unet.load_state_dict(lowerCamelCase )
lowerCamelCase : Any = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=lowerCamelCase, )
lowerCamelCase : Dict = LDMPipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase )
pipeline.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
_lowerCamelCase =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Optional[Any] = seq_length
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Optional[int] = use_input_mask
lowerCamelCase : str = use_token_type_ids
lowerCamelCase : Any = use_labels
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Any = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : str = num_labels
lowerCamelCase : Union[str, Any] = num_choices
lowerCamelCase : List[Any] = scope
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = DistilBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : str = model(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = DistilBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = DistilBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Any = model(
__magic_name__ , attention_mask=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : List[Any] = DistilBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = self.num_labels
lowerCamelCase : Optional[Any] = DistilBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_choices
lowerCamelCase : List[str] = DistilBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Optional[Any] = model(
__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Any = config_and_inputs
lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCAmelCase : List[str] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Dict = True
_UpperCAmelCase : str = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = True
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = DistilBertModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , dim=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = DistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCamelCase : Any = True
lowerCamelCase : List[Any] = model_class(config=__magic_name__ )
lowerCamelCase : Any = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[Any] = torch.jit.trace(
__magic_name__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , """traced_model.pt""" ) )
lowerCamelCase : List[str] = torch.jit.load(os.path.join(__magic_name__ , """traced_model.pt""" ) , map_location=__magic_name__ )
loaded(inputs_dict["""input_ids"""].to(__magic_name__ ) , inputs_dict["""attention_mask"""].to(__magic_name__ ) )
@require_torch
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : List[str] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
lowerCamelCase : Any = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
lowerCamelCase : Tuple = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError("""Input value must be an 'int' type""" )
lowerCamelCase : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 1 |
from __future__ import annotations
class A__ :
def __init__( self , __magic_name__ = 0 ):
lowerCamelCase : List[str] = key
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
lowerCamelCase : int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__magic_name__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__magic_name__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = 0 ):
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
lowerCamelCase : Any = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
lowerCamelCase : int = """"""
for ch in content:
ans += chr(ord(__magic_name__ ) ^ key )
return ans
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = 0 ):
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
lowerCamelCase : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
lowerCamelCase : Union[str, Any] = """"""
for ch in content:
ans += chr(ord(__magic_name__ ) ^ key )
return ans
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = 0 ):
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
try:
with open(__magic_name__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__magic_name__ , __magic_name__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
try:
with open(__magic_name__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__magic_name__ , __magic_name__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 287 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class A__ :
def __init__( self , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCamelCase : Any = deepcopy(__magic_name__ )
elif os.path.exists(__magic_name__ ):
with io.open(__magic_name__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase : Tuple = json.load(__magic_name__ )
else:
try:
lowerCamelCase : Tuple = baseaa.urlsafe_baadecode(__magic_name__ ).decode("""utf-8""" )
lowerCamelCase : Optional[int] = json.loads(__magic_name__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowerCamelCase : Union[str, Any] = config
self.set_stage_and_offload()
def UpperCamelCase__ ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCamelCase : str = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCamelCase : Union[str, Any] = False
if self.is_zeroa() or self.is_zeroa():
lowerCamelCase : Tuple = set(["""cpu""", """nvme"""] )
lowerCamelCase : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCamelCase : Tuple = True
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = self.config
# find the config node of interest if it exists
lowerCamelCase : int = ds_key_long.split(""".""" )
lowerCamelCase : Optional[int] = nodes.pop()
for node in nodes:
lowerCamelCase : List[str] = config.get(__magic_name__ )
if config is None:
return None, ds_key
return config, ds_key
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
lowerCamelCase , lowerCamelCase : Optional[Any] = self.find_config_node(__magic_name__ )
if config is None:
return default
return config.get(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False ):
lowerCamelCase : int = self.config
# find the config node of interest if it exists
lowerCamelCase : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowerCamelCase : Optional[Any] = config
lowerCamelCase : Dict = config.get(__magic_name__ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[int] = self.get_value(__magic_name__ )
return False if value is None else bool(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Tuple = self.get_value(__magic_name__ )
return False if value is None else not bool(__magic_name__ )
def UpperCamelCase__ ( self ):
return self._stage == 2
def UpperCamelCase__ ( self ):
return self._stage == 3
def UpperCamelCase__ ( self ):
return self._offload
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : str = engine
def UpperCamelCase__ ( self , __magic_name__ , **__magic_name__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(__magic_name__ , **__magic_name__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , __magic_name__ ):
super().__init__(__magic_name__ , device_placement=__magic_name__ , scaler=__magic_name__ )
lowerCamelCase : Dict = hasattr(self.optimizer , """overflow""" )
def UpperCamelCase__ ( self , __magic_name__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def UpperCamelCase__ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def UpperCamelCase__ ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class A__ :
def __init__( self , __magic_name__ , __magic_name__=0.001 , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : List[Any] = params
lowerCamelCase : Dict = lr
lowerCamelCase : Optional[int] = weight_decay
lowerCamelCase : int = kwargs
class A__ :
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : Dict = optimizer
lowerCamelCase : Dict = total_num_steps
lowerCamelCase : List[str] = warmup_num_steps
lowerCamelCase : str = kwargs
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : str = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase : int = len(__magic_name__ ) - 1
def UpperCamelCase__ ( self , __magic_name__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __magic_name__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__magic_name__ ) , 5 ) == 1
return output_values
def UpperCamelCase__ ( self , __magic_name__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase : Optional[int] = self.basis_function(__magic_name__ )
lowerCamelCase : int = 0.0
lowerCamelCase : List[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase__ ( self , __magic_name__ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase : list[float] = [] # x coordinates of points to plot
lowerCamelCase : list[float] = [] # y coordinates of points to plot
lowerCamelCase : List[Any] = 0.0
while t <= 1:
lowerCamelCase : Union[str, Any] = self.bezier_curve_function(__magic_name__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
lowerCamelCase : Optional[Any] = [i[1] for i in self.list_of_points]
plt.plot(
__magic_name__ , __magic_name__ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__magic_name__ , __magic_name__ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = (DDIMParallelScheduler,)
_UpperCAmelCase : Any = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCamelCase__ ( self , **__magic_name__ ):
lowerCamelCase : Tuple = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**__magic_name__ )
return config
def UpperCamelCase__ ( self , **__magic_name__ ):
lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : Union[str, Any] = scheduler_class(**__magic_name__ )
lowerCamelCase , lowerCamelCase : Dict = 1_0, 0.0
lowerCamelCase : Any = self.dummy_model()
lowerCamelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for t in scheduler.timesteps:
lowerCamelCase : List[Any] = model(__magic_name__ , __magic_name__ )
lowerCamelCase : List[str] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def UpperCamelCase__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__magic_name__ )
lowerCamelCase : Optional[int] = self.scheduler_classes[0]
lowerCamelCase : List[Any] = self.get_scheduler_config(steps_offset=1 )
lowerCamelCase : Optional[Any] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def UpperCamelCase__ ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def UpperCamelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def UpperCamelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__magic_name__ )
def UpperCamelCase__ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__magic_name__ )
def UpperCamelCase__ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__magic_name__ )
def UpperCamelCase__ ( self ):
self.check_over_configs(thresholding=__magic_name__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__magic_name__ , prediction_type=__magic_name__ , sample_max_value=__magic_name__ , )
def UpperCamelCase__ ( self ):
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=__magic_name__ )
def UpperCamelCase__ ( self ):
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=__magic_name__ , num_inference_steps=__magic_name__ )
def UpperCamelCase__ ( self ):
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__magic_name__ , eta=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase : str = self.get_scheduler_config()
lowerCamelCase : Optional[int] = scheduler_class(**__magic_name__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[int] = self.get_scheduler_config()
lowerCamelCase : List[Any] = scheduler_class(**__magic_name__ )
lowerCamelCase , lowerCamelCase : int = 1_0, 0.0
scheduler.set_timesteps(__magic_name__ )
lowerCamelCase : Any = self.dummy_model()
lowerCamelCase : Any = self.dummy_sample_deter
lowerCamelCase : int = self.dummy_sample_deter + 0.1
lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
lowerCamelCase : Union[str, Any] = samplea.shape[0]
lowerCamelCase : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase : Optional[Any] = torch.arange(__magic_name__ )[0:3, None].repeat(1 , __magic_name__ )
lowerCamelCase : str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase : Union[str, Any] = scheduler.batch_step_no_noise(__magic_name__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __magic_name__ )
lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.full_loop()
lowerCamelCase : Optional[int] = torch.sum(torch.abs(__magic_name__ ) )
lowerCamelCase : List[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.full_loop(prediction_type="""v_prediction""" )
lowerCamelCase : Tuple = torch.sum(torch.abs(__magic_name__ ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCamelCase__ ( self ):
# We specify different beta, so that the first alpha is 0.99
lowerCamelCase : int = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
lowerCamelCase : Optional[int] = torch.sum(torch.abs(__magic_name__ ) )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCamelCase__ ( self ):
# We specify different beta, so that the first alpha is 0.99
lowerCamelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 287 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowerCamelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
_UpperCAmelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(__SCREAMING_SNAKE_CASE)})
_UpperCAmelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""})
_UpperCAmelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCAmelCase : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_UpperCAmelCase : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_UpperCAmelCase : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""})
_UpperCAmelCase : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""})
_UpperCAmelCase : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""})
_UpperCAmelCase : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_UpperCAmelCase : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""})
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """train"""
_UpperCAmelCase : Any = """dev"""
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : SquadDataTrainingArguments
_UpperCAmelCase : List[SquadFeatures]
_UpperCAmelCase : Split
_UpperCAmelCase : bool
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ):
lowerCamelCase : Optional[int] = args
lowerCamelCase : Tuple = is_language_sensitive
lowerCamelCase : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowerCamelCase : Tuple = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
lowerCamelCase : Dict = mode
# Load data features from cache or dataset file
lowerCamelCase : Tuple = """v2""" if args.version_2_with_negative else """v1"""
lowerCamelCase : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase : Any = cached_features_file + """.lock"""
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
lowerCamelCase : int = time.time()
lowerCamelCase : List[str] = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase : Dict = self.old_features["""features"""]
lowerCamelCase : List[Any] = self.old_features.get("""dataset""" , __magic_name__ )
lowerCamelCase : List[Any] = self.old_features.get("""examples""" , __magic_name__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
""" future run""" )
else:
if mode == Split.dev:
lowerCamelCase : int = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
lowerCamelCase , lowerCamelCase : List[str] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
lowerCamelCase : List[Any] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , __magic_name__ ):
# Convert to Tensors and build dataset
lowerCamelCase : Tuple = self.features[i]
lowerCamelCase : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase : Dict = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase : int = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase : Any = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase : Dict = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 287 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 1 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : List[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : Dict = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 287 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = RoFormerTokenizer
_UpperCAmelCase : Optional[int] = RoFormerTokenizerFast
_UpperCAmelCase : Dict = True
_UpperCAmelCase : List[str] = True
def UpperCamelCase__ ( self ):
super().setUp()
def UpperCamelCase__ ( self , **__magic_name__ ):
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__magic_name__ )
def UpperCamelCase__ ( self , **__magic_name__ ):
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """永和服装饰品有限公司,今天天气非常好"""
lowerCamelCase : str = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.get_chinese_input_output_texts()
lowerCamelCase : int = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , output_text.split() )
lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCamelCase : List[Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.get_rust_tokenizer()
lowerCamelCase , lowerCamelCase : List[str] = self.get_chinese_input_output_texts()
lowerCamelCase : int = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , output_text.split() )
lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCamelCase : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = nn.functional.normalize(lowerCamelCase )
lowerCamelCase : List[Any] = nn.functional.normalize(lowerCamelCase )
return torch.mm(lowerCamelCase, normalized_text_embeds.t() )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = CLIPConfig
_UpperCAmelCase : Any = ["""CLIPEncoderLayer"""]
def __init__( self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase : Dict = CLIPVisionModel(config.vision_config )
lowerCamelCase : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ )
lowerCamelCase : Optional[int] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase : Tuple = nn.Parameter(torch.ones(1_7 ) , requires_grad=__magic_name__ )
lowerCamelCase : Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ )
@torch.no_grad()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase : List[str] = self.visual_projection(__magic_name__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase : Tuple = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase : Union[str, Any] = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase : List[str] = []
lowerCamelCase : Any = image_embeds.shape[0]
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase : Tuple = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase : Tuple = special_cos_dist[i][concept_idx]
lowerCamelCase : Any = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase : List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase : Optional[Any] = cos_dist[i][concept_idx]
lowerCamelCase : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__magic_name__ )
result.append(__magic_name__ )
lowerCamelCase : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase : str = self.visual_projection(__magic_name__ )
lowerCamelCase : str = cosine_distance(__magic_name__ , self.special_care_embeds )
lowerCamelCase : Optional[int] = cosine_distance(__magic_name__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase : Any = 0.0
lowerCamelCase : Any = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase : int = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase : Any = special_care * 0.01
lowerCamelCase : Dict = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase : int = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 287 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : List[str] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Tuple = seq_length
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : List[Any] = use_token_type_ids
lowerCamelCase : List[Any] = use_labels
lowerCamelCase : int = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[Any] = type_vocab_size
lowerCamelCase : str = type_sequence_label_size
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = num_labels
lowerCamelCase : List[str] = num_choices
lowerCamelCase : Optional[int] = scope
lowerCamelCase : Any = self.vocab_size - 1
def UpperCamelCase__ ( self ):
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : int = None
if self.use_token_type_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : str = None
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : str = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Optional[Any] = OpenAIGPTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : List[Any] = model(__magic_name__ , token_type_ids=__magic_name__ , head_mask=__magic_name__ )
lowerCamelCase : str = model(__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Optional[int] = OpenAIGPTLMHeadModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : int = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Tuple = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Optional[int] = self.num_labels
lowerCamelCase : Dict = OpenAIGPTForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[Any] = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
lowerCamelCase : Optional[int] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ , )
lowerCamelCase : Dict = inputs_dict["""labels"""]
lowerCamelCase : int = inputs_dict["""labels"""]
lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__magic_name__ , )
lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = OpenAIGPTModelTester(self )
lowerCamelCase : int = ConfigTester(self , config_class=__magic_name__ , n_embd=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = OpenAIGPTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(__magic_name__ )
lowerCamelCase : Any = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__magic_name__ ) # the president is
lowerCamelCase : int = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase : Union[str, Any] = model.generate(__magic_name__ , do_sample=__magic_name__ )
self.assertListEqual(output_ids[0].tolist() , __magic_name__ )
| 287 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 1 |
from math import isclose, sqrt
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = point_y / 4 / point_x
lowerCamelCase : List[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase : Optional[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase : Optional[int] = outgoing_gradient**2 + 4
lowerCamelCase : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase : List[str] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase : Tuple = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase : str = x_minus if isclose(lowerCamelCase, lowerCamelCase ) else x_plus
lowerCamelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( lowerCamelCase = 1.4, lowerCamelCase = -9.6 ):
lowerCamelCase : int = 0
lowerCamelCase : float = first_x_coord
lowerCamelCase : float = first_y_coord
lowerCamelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = next_point(lowerCamelCase, lowerCamelCase, lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase =data_utils.TransfoXLTokenizer
_lowerCamelCase =data_utils.TransfoXLCorpus
_lowerCamelCase =data_utils
_lowerCamelCase =data_utils
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCamelCase, """rb""" ) as fp:
lowerCamelCase : Any = pickle.load(lowerCamelCase, encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase : List[str] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
lowerCamelCase : Optional[Any] = corpus.vocab.__dict__
torch.save(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""", lowerCamelCase )
lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(lowerCamelCase, lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase : int = os.path.abspath(lowerCamelCase )
lowerCamelCase : int = os.path.abspath(lowerCamelCase )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase : List[str] = TransfoXLConfig()
else:
lowerCamelCase : Optional[int] = TransfoXLConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Dict = TransfoXLLMHeadModel(lowerCamelCase )
lowerCamelCase : Union[str, Any] = load_tf_weights_in_transfo_xl(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Any = os.path.join(lowerCamelCase, lowerCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(lowerCamelCase )}''' )
torch.save(model.state_dict(), lowerCamelCase )
print(F'''Save configuration file to {os.path.abspath(lowerCamelCase )}''' )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
_lowerCamelCase =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _a ( lowerCamelCase = "" ):
lowerCamelCase : Any = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowerCamelCase : str = BeautifulSoup(requests.get(lowerCamelCase ).text, """html.parser""" )
lowerCamelCase : Tuple = soup.find_all("""td""", attrs="""titleColumn""" )
lowerCamelCase : Any = soup.find_all("""td""", class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCamelCase, lowerCamelCase )
}
def _a ( lowerCamelCase = "IMDb_Top_250_Movies.csv" ):
lowerCamelCase : Dict = get_imdb_top_aaa_movies()
with open(lowerCamelCase, """w""", newline="""""" ) as out_file:
lowerCamelCase : Any = csv.writer(lowerCamelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 287 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=__magic_name__ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__magic_name__ , from_pt=__magic_name__ , dtype=jnp.bfloataa )
lowerCamelCase : List[Any] = controlnet_params
lowerCamelCase : Tuple = """bird"""
lowerCamelCase : int = jax.device_count()
lowerCamelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
lowerCamelCase : int = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase : Tuple = jax.random.PRNGKey(0 )
lowerCamelCase : Union[str, Any] = jax.random.split(__magic_name__ , jax.device_count() )
lowerCamelCase : Any = replicate(__magic_name__ )
lowerCamelCase : int = shard(__magic_name__ )
lowerCamelCase : Any = shard(__magic_name__ )
lowerCamelCase : Union[str, Any] = pipe(
prompt_ids=__magic_name__ , image=__magic_name__ , params=__magic_name__ , prng_seed=__magic_name__ , num_inference_steps=5_0 , jit=__magic_name__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowerCamelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCamelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : int = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=__magic_name__ , dtype=jnp.bfloataa )
lowerCamelCase , lowerCamelCase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__magic_name__ , from_pt=__magic_name__ , dtype=jnp.bfloataa )
lowerCamelCase : Tuple = controlnet_params
lowerCamelCase : List[Any] = """Chef in the kitchen"""
lowerCamelCase : Any = jax.device_count()
lowerCamelCase : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
lowerCamelCase : Union[str, Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase : List[Any] = jax.random.split(__magic_name__ , jax.device_count() )
lowerCamelCase : Union[str, Any] = replicate(__magic_name__ )
lowerCamelCase : int = shard(__magic_name__ )
lowerCamelCase : Union[str, Any] = shard(__magic_name__ )
lowerCamelCase : int = pipe(
prompt_ids=__magic_name__ , image=__magic_name__ , params=__magic_name__ , prng_seed=__magic_name__ , num_inference_steps=5_0 , jit=__magic_name__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowerCamelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase : List[Any] = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 287 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 1 |
from __future__ import annotations
import math
_lowerCamelCase ="""2020.9.26"""
_lowerCamelCase ="""xcodz-dot, cclaus, dhruvmanila"""
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not all(isinstance(lowerCamelCase, (float, int) ) for val in locals().values() ):
lowerCamelCase : Optional[int] = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(lowerCamelCase )
lowerCamelCase : str = ((x * distance) / (z + distance)) * scale
lowerCamelCase : Optional[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError("""Axis must be a str""" )
lowerCamelCase : List[Any] = locals()
del input_variables["axis"]
if not all(isinstance(lowerCamelCase, (float, int) ) for val in input_variables.values() ):
lowerCamelCase : Dict = (
"""Input values except axis must either be float or int: """
F'''{list(input_variables.values() )}'''
)
raise TypeError(lowerCamelCase )
lowerCamelCase : str = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase : List[Any] = x * math.cos(lowerCamelCase ) - y * math.sin(lowerCamelCase )
lowerCamelCase : str = y * math.cos(lowerCamelCase ) + x * math.sin(lowerCamelCase )
lowerCamelCase : Any = z
elif axis == "x":
lowerCamelCase : Optional[int] = y * math.cos(lowerCamelCase ) - z * math.sin(lowerCamelCase )
lowerCamelCase : str = z * math.cos(lowerCamelCase ) + y * math.sin(lowerCamelCase )
lowerCamelCase : List[str] = x
elif axis == "y":
lowerCamelCase : List[Any] = x * math.cos(lowerCamelCase ) - z * math.sin(lowerCamelCase )
lowerCamelCase : Union[str, Any] = z * math.cos(lowerCamelCase ) + x * math.sin(lowerCamelCase )
lowerCamelCase : List[Any] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 287 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _a ( lowerCamelCase, lowerCamelCase=(), lowerCamelCase=None, lowerCamelCase="no", lowerCamelCase="29500" ):
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
lowerCamelCase : str = True
elif "IPython" in sys.modules:
lowerCamelCase : str = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
lowerCamelCase : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""", lowerCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
lowerCamelCase : Tuple = 8
lowerCamelCase : List[str] = PrepareForLaunch(lowerCamelCase, distributed_type="""TPU""" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(lowerCamelCase, args=lowerCamelCase, nprocs=lowerCamelCase, start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*lowerCamelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase, master_addr="""127.0.01""", master_port=lowerCamelCase, mixed_precision=lowerCamelCase ):
lowerCamelCase : Any = PrepareForLaunch(lowerCamelCase, distributed_type="""MULTI_GPU""" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(lowerCamelCase, args=lowerCamelCase, nprocs=lowerCamelCase, start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase : List[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=(), lowerCamelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase, master_addr="""127.0.01""", master_port="""29500""", accelerate_mixed_precision="""no""", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="""yes""", ):
lowerCamelCase : Optional[int] = PrepareForLaunch(lowerCamelCase, debug=lowerCamelCase )
start_processes(lowerCamelCase, args=lowerCamelCase, nprocs=lowerCamelCase, start_method="""fork""" )
| 287 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = state_dict.pop(lowerCamelCase )
lowerCamelCase : List[Any] = val
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase : Dict = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
lowerCamelCase : List[str] = value
else:
lowerCamelCase : Tuple = value
return new_state_dict
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : List[str] = """"""
if is_panoptic:
lowerCamelCase : List[str] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Tuple = in_proj_weight[:256, :]
lowerCamelCase : Union[str, Any] = in_proj_bias[:256]
lowerCamelCase : int = in_proj_weight[256:512, :]
lowerCamelCase : Dict = in_proj_bias[256:512]
lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :]
lowerCamelCase : str = in_proj_bias[-256:]
def _a ( ):
lowerCamelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Optional[int] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase : int = """resnet101"""
if "dc5" in model_name:
lowerCamelCase : Tuple = True
lowerCamelCase : int = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase : int = 250
else:
lowerCamelCase : Optional[int] = 91
lowerCamelCase : int = """huggingface/label-files"""
lowerCamelCase : Optional[Any] = """coco-detection-id2label.json"""
lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type="""dataset""" ), """r""" ) )
lowerCamelCase : Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase : int = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase : Optional[int] = ConditionalDetrImageProcessor(format=lowerCamelCase )
# prepare image
lowerCamelCase : str = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
lowerCamelCase : int = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase : Optional[int] = torch.hub.load("""DeppMeng/ConditionalDETR""", lowerCamelCase, pretrained=lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase : Dict = """conditional_detr.""" + src
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = rename_backbone_keys(lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase, is_panoptic=lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase : List[str] = state_dict.pop(lowerCamelCase )
lowerCamelCase : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase : Any = state_dict.pop(lowerCamelCase )
lowerCamelCase : str = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase : str = state_dict.pop(lowerCamelCase )
lowerCamelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase : int = state_dict.pop(lowerCamelCase )
lowerCamelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
lowerCamelCase : str = ConditionalDetrForSegmentation(lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase, organization="""DepuMeng""", commit_message="""Add model""" )
# verify our conversion
lowerCamelCase : Dict = conditional_detr(lowerCamelCase )
lowerCamelCase : Union[str, Any] = model(lowerCamelCase )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 287 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 1 |
import random
class A__ :
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Any = [ord(__magic_name__ ) for i in text]
lowerCamelCase : List[Any] = []
lowerCamelCase : str = []
for i in plain:
lowerCamelCase : List[str] = random.randint(1 , 3_0_0 )
lowerCamelCase : str = (i + k) * k
cipher.append(__magic_name__ )
key.append(__magic_name__ )
return cipher, key
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = []
for i in range(len(__magic_name__ ) ):
lowerCamelCase : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__magic_name__ ) )
return "".join(__magic_name__ )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase =Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 287 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _a ( lowerCamelCase ):
return EnvironmentCommand()
class A__ ( __SCREAMING_SNAKE_CASE):
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = parser.add_parser("""env""" )
download_parser.set_defaults(func=__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = huggingface_hub.__version__
lowerCamelCase : int = """not installed"""
lowerCamelCase : Dict = """NA"""
if is_torch_available():
import torch
lowerCamelCase : int = torch.__version__
lowerCamelCase : List[str] = torch.cuda.is_available()
lowerCamelCase : List[str] = """not installed"""
if is_transformers_available():
import transformers
lowerCamelCase : Any = transformers.__version__
lowerCamelCase : int = """not installed"""
if is_accelerate_available():
import accelerate
lowerCamelCase : List[str] = accelerate.__version__
lowerCamelCase : Any = """not installed"""
if is_xformers_available():
import xformers
lowerCamelCase : Optional[int] = xformers.__version__
lowerCamelCase : List[Any] = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__magic_name__ ) )
return info
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
_lowerCamelCase =[0, 2, 4, 6, 8]
_lowerCamelCase =[1, 3, 5, 7, 9]
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase : List[Any] = 0
for digit in range(10 ):
lowerCamelCase : Union[str, Any] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, lowerCamelCase, lowerCamelCase )
return result
lowerCamelCase : Tuple = 0
for digita in range(10 ):
lowerCamelCase : Optional[int] = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase : str = ODD_DIGITS
else:
lowerCamelCase : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase : List[Any] = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, lowerCamelCase, lowerCamelCase, )
return result
def _a ( lowerCamelCase = 9 ):
lowerCamelCase : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(lowerCamelCase, 0, [0] * length, lowerCamelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
import os
import sys
import unittest
_lowerCamelCase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCamelCase =os.path.join(git_repo_path, """src""", """transformers""")
_lowerCamelCase ="""
{0} = None
"""
_lowerCamelCase ="""
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
_lowerCamelCase ="""
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(__magic_name__ )
lowerCamelCase : int = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(__magic_name__ , """tokenizers""" )
lowerCamelCase : Optional[int] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(__magic_name__ , """tensorflow_text""" )
lowerCamelCase : List[Any] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(__magic_name__ , """sentencepiece_and_tokenizers""" )
lowerCamelCase : List[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(__magic_name__ , """sentencepiece_and_tensorflow_text""" )
lowerCamelCase : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(__magic_name__ , """sentencepiece_and_tokenizers_and_vision""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , __magic_name__ )
self.assertIn("""tensorflow_text""" , __magic_name__ )
self.assertIn("""sentencepiece_and_tokenizers""" , __magic_name__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(__magic_name__ , """\nCONSTANT = None\n""" )
lowerCamelCase : Union[str, Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
__magic_name__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowerCamelCase : Union[str, Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCamelCase : List[str] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCamelCase : str = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , __magic_name__ )
| 287 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = """van"""
def __init__( self , __magic_name__=2_2_4 , __magic_name__=3 , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[6_4, 1_2_8, 3_2_0, 5_1_2] , __magic_name__=[3, 3, 1_2, 3] , __magic_name__=[8, 8, 4, 4] , __magic_name__="gelu" , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=1e-2 , __magic_name__=0.0 , __magic_name__=0.0 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Optional[int] = patch_sizes
lowerCamelCase : List[Any] = strides
lowerCamelCase : Tuple = hidden_sizes
lowerCamelCase : Optional[int] = depths
lowerCamelCase : List[Any] = mlp_ratios
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Any = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[str] = layer_scale_init_value
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[Any] = dropout_rate
| 287 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 1 |
import math
def _a ( lowerCamelCase, lowerCamelCase ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
from string import ascii_uppercase
_lowerCamelCase ={char: i for i, char in enumerate(ascii_uppercase)}
_lowerCamelCase =dict(enumerate(ascii_uppercase))
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = len(lowerCamelCase )
lowerCamelCase : str = 0
while True:
if x == i:
lowerCamelCase : int = 0
if len(lowerCamelCase ) == len(lowerCamelCase ):
break
key += key[i]
i += 1
return key
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = """"""
lowerCamelCase : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase : Dict = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = """"""
lowerCamelCase : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _a ( ):
lowerCamelCase : int = """THE GERMAN ATTACK"""
lowerCamelCase : List[Any] = """SECRET"""
lowerCamelCase : Optional[Any] = generate_key(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Union[str, Any] = cipher_text(lowerCamelCase, lowerCamelCase )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(lowerCamelCase, lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCamelCase ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase =direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCamelCase =transformers.models.auto.configuration_auto.CONFIG_MAPPING
_lowerCamelCase ={
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase : Optional[int] = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''', lowerCamelCase, )
is not None
):
lowerCamelCase : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase : Union[str, Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase : Tuple = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowerCamelCase : Optional[int] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowerCamelCase : Any = True
if not attribute_used:
lowerCamelCase : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase : List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase : Union[str, Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase : List[Any] = True
elif attribute.endswith("""_token_id""" ):
lowerCamelCase : str = True
# configuration class specific cases
if not case_allowed:
lowerCamelCase : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
lowerCamelCase : List[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase : Any = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowerCamelCase : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase : Optional[int] = {}
if len(config_class.attribute_map ) > 0:
lowerCamelCase : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase : Optional[Any] = inspect.getsourcefile(lowerCamelCase )
lowerCamelCase : List[Any] = os.path.dirname(lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase : Optional[Any] = [os.path.join(lowerCamelCase, lowerCamelCase ) for fn in os.listdir(lowerCamelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowerCamelCase : int = []
for path in modeling_paths:
if os.path.isfile(lowerCamelCase ):
with open(lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase : Union[str, Any] = []
for config_param, default_value in zip(lowerCamelCase, lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase : Optional[int] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(lowerCamelCase )
def _a ( ):
lowerCamelCase : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase : Optional[int] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda lowerCamelCase : inspect.isclass(lowerCamelCase )
and issubclass(lowerCamelCase, lowerCamelCase )
and inspect.getmodule(lowerCamelCase ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
lowerCamelCase : str = check_config_attributes_being_used(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCamelCase : Dict = unused_attributes
if len(lowerCamelCase ) > 0:
lowerCamelCase : Tuple = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 287 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 1 |
from math import factorial
_lowerCamelCase ={str(d): factorial(d) for d in range(1_0)}
def _a ( lowerCamelCase ):
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase ) )
def _a ( ):
lowerCamelCase : List[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3, lowerCamelCase ) if sum_of_digit_factorial(lowerCamelCase ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=2_4 , __magic_name__=2 , __magic_name__=6 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=None , __magic_name__=1_0_0_0 , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : Tuple = seq_length
lowerCamelCase : int = is_training
lowerCamelCase : str = use_input_mask
lowerCamelCase : List[Any] = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Any = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Dict = type_sequence_label_size
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = num_labels
lowerCamelCase : Union[str, Any] = scope
lowerCamelCase : Optional[Any] = range_bbox
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase : int = bbox[i, j, 3]
lowerCamelCase : Tuple = bbox[i, j, 1]
lowerCamelCase : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase : List[str] = bbox[i, j, 2]
lowerCamelCase : Tuple = bbox[i, j, 0]
lowerCamelCase : List[str] = t
lowerCamelCase : Any = None
if self.use_input_mask:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase : Tuple = None
if self.use_token_type_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Tuple = None
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[str] = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : List[Any] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Union[str, Any] = self.num_labels
lowerCamelCase : Any = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[str] = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
lowerCamelCase : Dict = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
return True
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = LiltModelTester(self )
lowerCamelCase : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__magic_name__ )
lowerCamelCase : Optional[Any] = torch.tensor([[1, 2]] , device=__magic_name__ )
lowerCamelCase : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : int = model(input_ids=__magic_name__ , bbox=__magic_name__ )
lowerCamelCase : Union[str, Any] = torch.Size([1, 2, 7_6_8] )
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1e-3 ) )
| 287 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """roberta"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : int = max_position_embeddings
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Union[str, Any] = position_embedding_type
lowerCamelCase : Any = use_cache
lowerCamelCase : Dict = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_lowerCamelCase =True
from torch.cuda.amp import autocast
_lowerCamelCase =logging.getLogger(__name__)
def _a ( lowerCamelCase=None, lowerCamelCase=None ):
return field(default_factory=lambda: default, metadata=lowerCamelCase )
@dataclass
class A__ :
_UpperCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_UpperCAmelCase : Optional[bool] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""})
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""})
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
_UpperCAmelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
_UpperCAmelCase : Optional[float] = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
_UpperCAmelCase : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""})
@dataclass
class A__ :
_UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_UpperCAmelCase : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
_UpperCAmelCase : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class A__ :
_UpperCAmelCase : WavaVecaProcessor
_UpperCAmelCase : Union[bool, str] = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
def __call__( self , __magic_name__ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowerCamelCase : str = [{"""input_values""": feature["""input_values"""]} for feature in features]
lowerCamelCase : Any = [{"""input_ids""": feature["""labels"""]} for feature in features]
lowerCamelCase : List[str] = self.processor.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
lowerCamelCase : Optional[Any] = self.processor.pad(
labels=__magic_name__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
lowerCamelCase : int = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
lowerCamelCase : Dict = labels
return batch
class A__ ( __SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
model.train()
lowerCamelCase : int = self._prepare_inputs(__magic_name__ )
if self.use_amp:
with autocast():
lowerCamelCase : str = self.compute_loss(__magic_name__ , __magic_name__ )
else:
lowerCamelCase : Any = self.compute_loss(__magic_name__ , __magic_name__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase : int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase : List[str] = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__magic_name__ ).backward()
elif self.use_apex:
with amp.scale_loss(__magic_name__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__magic_name__ )
else:
loss.backward()
return loss.detach()
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""", lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCamelCase : Dict = datasets.load_dataset(
"""common_voice""", data_args.dataset_config_name, split=data_args.train_split_name )
lowerCamelCase : Optional[Any] = datasets.load_dataset("""common_voice""", data_args.dataset_config_name, split="""test""" )
# Create and save tokenizer
lowerCamelCase : Optional[Any] = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(lowerCamelCase ):
lowerCamelCase : Union[str, Any] = re.sub(lowerCamelCase, """""", batch["""sentence"""] ).lower() + """ """
return batch
lowerCamelCase : int = train_dataset.map(lowerCamelCase, remove_columns=["""sentence"""] )
lowerCamelCase : Any = eval_dataset.map(lowerCamelCase, remove_columns=["""sentence"""] )
def extract_all_chars(lowerCamelCase ):
lowerCamelCase : int = """ """.join(batch["""text"""] )
lowerCamelCase : int = list(set(lowerCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCamelCase : Optional[int] = train_dataset.map(
lowerCamelCase, batched=lowerCamelCase, batch_size=-1, keep_in_memory=lowerCamelCase, remove_columns=train_dataset.column_names, )
lowerCamelCase : Tuple = train_dataset.map(
lowerCamelCase, batched=lowerCamelCase, batch_size=-1, keep_in_memory=lowerCamelCase, remove_columns=eval_dataset.column_names, )
lowerCamelCase : Union[str, Any] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
lowerCamelCase : Tuple = {v: k for k, v in enumerate(lowerCamelCase )}
lowerCamelCase : Optional[Any] = vocab_dict[""" """]
del vocab_dict[" "]
lowerCamelCase : str = len(lowerCamelCase )
lowerCamelCase : str = len(lowerCamelCase )
with open("""vocab.json""", """w""" ) as vocab_file:
json.dump(lowerCamelCase, lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase : Dict = WavaVecaCTCTokenizer(
"""vocab.json""", unk_token="""[UNK]""", pad_token="""[PAD]""", word_delimiter_token="""|""", )
lowerCamelCase : int = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCamelCase, return_attention_mask=lowerCamelCase )
lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCamelCase, tokenizer=lowerCamelCase )
lowerCamelCase : Dict = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction="""mean""", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
lowerCamelCase : Optional[int] = min(len(lowerCamelCase ), data_args.max_train_samples )
lowerCamelCase : List[str] = train_dataset.select(range(lowerCamelCase ) )
if data_args.max_val_samples is not None:
lowerCamelCase : str = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCamelCase : Dict = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Optional[Any] = torchaudio.load(batch["""path"""] )
lowerCamelCase : Optional[Any] = resampler(lowerCamelCase ).squeeze().numpy()
lowerCamelCase : List[str] = 1_6000
lowerCamelCase : Any = batch["""text"""]
return batch
lowerCamelCase : int = train_dataset.map(
lowerCamelCase, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
lowerCamelCase : Optional[Any] = eval_dataset.map(
lowerCamelCase, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCamelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
lowerCamelCase : Any = processor(
audio=batch["""speech"""], text=batch["""target_text"""], sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowerCamelCase )
return batch
lowerCamelCase : Any = train_dataset.map(
lowerCamelCase, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCamelCase, num_proc=data_args.preprocessing_num_workers, )
lowerCamelCase : Union[str, Any] = eval_dataset.map(
lowerCamelCase, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCamelCase, num_proc=data_args.preprocessing_num_workers, )
# Metric
lowerCamelCase : List[str] = datasets.load_metric("""wer""" )
def compute_metrics(lowerCamelCase ):
lowerCamelCase : str = pred.predictions
lowerCamelCase : Optional[Any] = np.argmax(lowerCamelCase, axis=-1 )
lowerCamelCase : Union[str, Any] = processor.tokenizer.pad_token_id
lowerCamelCase : Optional[int] = processor.batch_decode(lowerCamelCase )
# we do not want to group tokens when computing the metrics
lowerCamelCase : Any = processor.batch_decode(pred.label_ids, group_tokens=lowerCamelCase )
lowerCamelCase : Union[str, Any] = wer_metric.compute(predictions=lowerCamelCase, references=lowerCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCamelCase : Dict = DataCollatorCTCWithPadding(processor=lowerCamelCase, padding=lowerCamelCase )
# Initialize our Trainer
lowerCamelCase : Tuple = CTCTrainer(
model=lowerCamelCase, data_collator=lowerCamelCase, args=lowerCamelCase, compute_metrics=lowerCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase : Optional[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase : str = model_args.model_name_or_path
else:
lowerCamelCase : int = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCamelCase : Tuple = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model()
lowerCamelCase : Tuple = train_result.metrics
lowerCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
lowerCamelCase : Tuple = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics("""train""", lowerCamelCase )
trainer.save_metrics("""train""", lowerCamelCase )
trainer.save_state()
# Evaluation
lowerCamelCase : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : List[Any] = trainer.evaluate()
lowerCamelCase : Dict = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCamelCase )
lowerCamelCase : List[str] = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics("""eval""", lowerCamelCase )
trainer.save_metrics("""eval""", lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = None
__snake_case = None
UpperCAmelCase__ = namedtuple("CoinsDistribResult", "moves excess")
def _a ( a :TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(a :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a ) != count_coins(a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(a :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a , a = get_distrib(node.left )
a , a = get_distrib(node.right )
a = 1 - left_distrib_excess
a = 1 - right_distrib_excess
a = (
left_distrib_moves
+ right_distrib_moves
+ abs(a )
+ abs(a )
)
a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a , a )
return get_distrib(a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.