code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _A ( __lowercase ):
lowercase__: Tuple = '''trocr'''
lowercase__: Tuple = ['''past_key_values''']
lowercase__: Tuple = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Dict , __magic_name__ : List[Any]=5_02_65 , __magic_name__ : Tuple=10_24 , __magic_name__ : List[Any]=12 , __magic_name__ : int=16 , __magic_name__ : Optional[Any]=40_96 , __magic_name__ : str="gelu" , __magic_name__ : Optional[int]=5_12 , __magic_name__ : int=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Dict=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[str]=1 , __magic_name__ : str=0 , __magic_name__ : Dict=2 , **__magic_name__ : List[str] , ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = vocab_size
__snake_case : Union[str, Any] = d_model
__snake_case : List[Any] = decoder_layers
__snake_case : Optional[Any] = decoder_attention_heads
__snake_case : Optional[int] = decoder_ffn_dim
__snake_case : Optional[int] = activation_function
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[int] = dropout
__snake_case : str = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : str = init_std
__snake_case : List[str] = decoder_layerdrop
__snake_case : Any = use_cache
__snake_case : int = scale_embedding
__snake_case : List[Any] = use_learned_position_embeddings
__snake_case : int = layernorm_embedding
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
| 26
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase_ : List[Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( _A : Optional[Any]=None )-> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A__ = subparsers.add_parser("tpu-config" , description=_description )
else:
A__ = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
A__ = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_A , default=_A , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_A , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_A , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
A__ = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_A , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def UpperCamelCase ( _A : Optional[Any] )-> Optional[int]:
"""simple docstring"""
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
A__ = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _A ):
A__ = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
A__ = "; ".join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(_A )}""" )
return
subprocess.run(_A )
print("Successfully setup pod." )
def UpperCamelCase ( )-> Optional[int]:
"""simple docstring"""
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(_A )
| 491
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase( lowercase_ = 1000000 , lowercase_ = 10 ) -> int:
'''simple docstring'''
snake_case_ = defaultdict(lowercase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 161
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 161
| 1
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
if isinstance(lowercase__ ,lowercase__ ):
a_ = np.full((len(lowercase__ ), sequence_length, 2) ,lowercase__ )
else:
a_ = np.full((len(lowercase__ ), sequence_length) ,lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ ,lowercase__ ):
a_ = tensor[:sequence_length]
else:
a_ = tensor[:sequence_length]
else:
if isinstance(lowercase__ ,lowercase__ ):
a_ = tensor[:sequence_length]
else:
a_ = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
a_ = unicodedata.category(lowercase__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =42
_UpperCAmelCase =True
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =-100
_UpperCAmelCase ="pt"
def _lowerCAmelCase ( self: Tuple , a: Optional[int]) ->Optional[int]:
'''simple docstring'''
import torch
a_ = "label" if "label" in features[0].keys() else "labels"
a_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
a_ = self.tokenizer.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
a_ = torch.tensor(batch["entity_ids"]).shape[1]
a_ = self.tokenizer.padding_side
if padding_side == "right":
a_ = [
list(a) + [self.label_pad_token_id] * (sequence_length - len(a)) for label in labels
]
else:
a_ = [
[self.label_pad_token_id] * (sequence_length - len(a)) + list(a) for label in labels
]
a_ = [feature["ner_tags"] for feature in features]
a_ = padding_tensor(a , -1 , a , a)
a_ = [feature["original_entity_spans"] for feature in features]
a_ = padding_tensor(a , (-1, -1) , a , a)
a_ = {k: torch.tensor(a , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 685
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685
| 1
|
"""simple docstring"""
def A__ ( __lowerCamelCase = 1_0_0_0_0_0_0 ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1, __lowerCamelCase ):
for n in range(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ["""PoolFormerFeatureExtractor"""]
a__ : Any = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 309
| 0
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 583
|
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def lowerCamelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCamelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 583
| 1
|
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCamelCase( *UpperCAmelCase_ ):
if not isinstance(__a , __a ):
UpperCAmelCase : List[Any] = list(__a )
for i in range(len(__a ) ):
UpperCAmelCase : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(__a , __a ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCamelCase( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ):
if function is None:
return functools.partial(__a , starting_batch_size=__a )
UpperCAmelCase : Dict = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase : Union[str, Any] = list(inspect.signature(__a ).parameters.keys() )
# Guard against user error
if len(__a ) < (len(__a ) + 1):
UpperCAmelCase : Optional[int] = ', '.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__a , *__a , **__a )
except Exception as e:
if should_reduce_batch_size(__a ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 720
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[Any] ):
debug_launcher(test_script.main )
def lowerCAmelCase_ ( self : Dict ):
debug_launcher(test_ops.main )
| 17
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCamelCase ( a_ ):
def __init__( self : Optional[Any] , UpperCamelCase : pyspark.sql.DataFrame , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : bool = True , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : str = None , UpperCamelCase : bool = True , UpperCamelCase : str = "arrow" , **UpperCamelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Union[str, Any] = load_from_cache_file
lowerCAmelCase__ : List[str] = file_format
lowerCAmelCase__ : Any = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 299
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
# TODO Update this
snake_case__ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase ( snake_case__ ):
'''simple docstring'''
A_ = 'esm'
def __init__( self , A_=None , A_=None , A_=None , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_=0.1 , A_=0.1 , A_=10_26 , A_=0.02 , A_=1E-1_2 , A_="absolute" , A_=True , A_=None , A_=False , A_=False , A_=None , A_=None , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = emb_layer_norm_before
_lowerCamelCase = token_dropout
_lowerCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowerCamelCase = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = EsmFoldConfig(**lowercase_ )
_lowerCamelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowerCamelCase = get_default_vocab_list()
else:
_lowerCamelCase = vocab_list
else:
_lowerCamelCase = None
_lowerCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowercase_ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = super().to_dict()
if isinstance(self.esmfold_config , lowercase_ ):
_lowerCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if self.trunk is None:
_lowerCamelCase = TrunkConfig()
elif isinstance(self.trunk , lowercase_ ):
_lowerCamelCase = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = asdict(self )
_lowerCamelCase = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if self.structure_module is None:
_lowerCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_ ):
_lowerCamelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
_lowerCamelCase = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = asdict(self )
_lowerCamelCase = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1E-8
A_ = 1E5
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return asdict(self )
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a ( unittest.TestCase ):
__snake_case : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__snake_case : List[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = AudioClassificationPipeline(model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
# test with a raw waveform
lowerCAmelCase_ : List[Any] = np.zeros((3_40_00,) )
lowerCAmelCase_ : List[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def A ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ):
lowerCAmelCase_ : Dict = examples
lowerCAmelCase_ : str = audio_classifier(UpperCamelCase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
lowerCAmelCase_ : str = audio_classifier(UpperCamelCase_ , top_k=1 )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
self.run_torchaudio(UpperCamelCase_ )
@require_torchaudio
def A ( self : List[Any] , UpperCAmelCase : int ):
import datasets
# test with a local file
lowerCAmelCase_ : Dict = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
lowerCAmelCase_ : List[Any] = dataset[0]['audio']['array']
lowerCAmelCase_ : Tuple = audio_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
@require_torch
def A ( self : Any ):
lowerCAmelCase_ : List[Any] = 'anton-l/wav2vec2-random-tiny-classifier'
lowerCAmelCase_ : List[str] = pipeline("""audio-classification""" , model=UpperCamelCase_ )
lowerCAmelCase_ : List[Any] = np.ones((80_00,) )
lowerCAmelCase_ : Optional[int] = audio_classifier(UpperCamelCase_ , top_k=4 )
lowerCAmelCase_ : List[str] = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
lowerCAmelCase_ : List[str] = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCAmelCase_ : Dict = {'array': np.ones((80_00,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
lowerCAmelCase_ : Union[str, Any] = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def A ( self : str ):
import datasets
lowerCAmelCase_ : List[Any] = 'superb/wav2vec2-base-superb-ks'
lowerCAmelCase_ : List[Any] = pipeline("""audio-classification""" , model=UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
lowerCAmelCase_ : List[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
lowerCAmelCase_ : Any = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def A ( self : str ):
pass
| 600
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
UpperCAmelCase__ : Optional[int] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , repo_id='test-config' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
CustomConfig.register_for_auto_class()
UpperCAmelCase__ : Dict = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class a ( unittest.TestCase ):
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase__ : List[str] = c.n_embd + 1 # int
UpperCAmelCase__ : Optional[int] = c.resid_pdrop + 1.0 # float
UpperCAmelCase__ : Optional[int] = not c.scale_attn_weights # bool
UpperCAmelCase__ : str = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCamelCase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCamelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCamelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCamelCase_ , c.summary_type , 'mismatch for key: summary_type' )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = PretrainedConfig()
UpperCAmelCase__ : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCamelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
UpperCAmelCase__ : int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCamelCase_ , UpperCamelCase_ )]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCamelCase_ )}.''' )
def __snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
UpperCAmelCase__ : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCamelCase_ )
def __snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : str = mock.Mock()
UpperCAmelCase__ : List[Any] = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Dict = HTTPError
UpperCAmelCase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase_ ) as mock_head:
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __snake_case ( self ):
UpperCAmelCase__ : int = AutoConfig.from_pretrained('bert-base-cased' )
UpperCAmelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCamelCase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase__ : Tuple = ['config.42.0.0.json']
UpperCAmelCase__ : Tuple = 768
configuration.save_pretrained(UpperCamelCase_ )
shutil.move(os.path.join(UpperCamelCase_ , 'config.4.0.0.json' ) , os.path.join(UpperCamelCase_ , 'config.42.0.0.json' ) )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __snake_case ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCAmelCase__ : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
UpperCAmelCase__ : Optional[int] = 'v4.0.0'
UpperCAmelCase__ , UpperCAmelCase__ : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCamelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase__ : List[Any] = 'v3.0.0'
UpperCAmelCase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 110
| 0
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowercase :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : str = "cpu" , __lowerCamelCase : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = device
lowerCamelCase__ : Any = CLIPTokenizerFast.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Tuple = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
lowerCamelCase__ : Optional[Any] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
lowerCamelCase__ : List[str] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase__ : str = torchvision.transforms.Resize(224 )
lowerCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : int = self.resize(__lowerCamelCase )
lowerCamelCase__ : Tuple = self.center_crop(__lowerCamelCase )
lowerCamelCase__ : Any = self.normalize(__lowerCamelCase )
return images
def __call__( self : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.tokenizer(text=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Optional[int] = self.preprocess_img(__lowerCamelCase )
lowerCamelCase__ : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowercase ( nn.Module):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Optional[int]=0.0_1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]="image" , __lowerCamelCase : Tuple=True , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[int] = device if device else get_device()
if vqgan:
lowerCamelCase__ : Any = vqgan
else:
lowerCamelCase__ : Dict = load_vqgan(self.device , conf_path=__lowerCamelCase , ckpt_path=__lowerCamelCase )
self.vqgan.eval()
if clip:
lowerCamelCase__ : Dict = clip
else:
lowerCamelCase__ : Optional[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
lowerCamelCase__ : Any = ProcessorGradientFlow(device=self.device )
lowerCamelCase__ : Any = iterations
lowerCamelCase__ : Union[str, Any] = lr
lowerCamelCase__ : Tuple = log
lowerCamelCase__ : int = make_grid
lowerCamelCase__ : Optional[Any] = return_val
lowerCamelCase__ : List[Any] = quantize
lowerCamelCase__ : Optional[Any] = self.vqgan.decoder.z_shape
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=5 , __lowerCamelCase : Dict=True ):
'''simple docstring'''
lowerCamelCase__ : Any = []
if output_path is None:
lowerCamelCase__ : Optional[int] = './animation.gif'
if input_path is None:
lowerCamelCase__ : List[str] = self.save_path
lowerCamelCase__ : Tuple = sorted(glob(input_path + "/*" ) )
if not len(__lowerCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__lowerCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
lowerCamelCase__ : List[Any] = total_duration / len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = [frame_duration] * len(__lowerCamelCase )
if extend_frames:
lowerCamelCase__ : Union[str, Any] = 1.5
lowerCamelCase__ : Union[str, Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__lowerCamelCase ) )
imageio.mimsave(__lowerCamelCase , __lowerCamelCase , duration=__lowerCamelCase )
print(f"gif saved to {output_path}" )
def lowerCAmelCase ( self : str , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
lowerCamelCase__ : Any = preprocess(Image.open(__lowerCamelCase ) , target_image_size=256 ).to(self.device )
lowerCamelCase__ : str = preprocess_vqgan(__lowerCamelCase )
lowerCamelCase__ : List[Any] = self.vqgan.encode(__lowerCamelCase )
return z
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.latent.detach().requires_grad_()
lowerCamelCase__ : Optional[int] = base_latent + transform_vector
if self.quantize:
lowerCamelCase__ : Dict = self.vqgan.quantize(__lowerCamelCase )
else:
lowerCamelCase__ : int = trans_latent
return self.vqgan.decode(__lowerCamelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.clip_preprocessor(text=__lowerCamelCase , images=__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase )
lowerCamelCase__ : int = self.clip(**__lowerCamelCase )
lowerCamelCase__ : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__ : List[str] = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._get_clip_similarity(pos_prompts["prompts"] , __lowerCamelCase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
lowerCamelCase__ : Optional[Any] = self._get_clip_similarity(neg_prompts["prompts"] , __lowerCamelCase , weights=neg_prompts["weights"] )
else:
lowerCamelCase__ : str = torch.tensor([1] , device=self.device )
lowerCamelCase__ : Dict = -torch.log(__lowerCamelCase ) + torch.log(__lowerCamelCase )
return loss
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = torch.randn_like(self.latent , requires_grad=__lowerCamelCase , device=self.device )
lowerCamelCase__ : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase__ : Optional[Any] = self._add_vector(__lowerCamelCase )
lowerCamelCase__ : List[Any] = loop_post_process(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print("CLIP loss" , __lowerCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
wandb.init(reinit=__lowerCamelCase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
lowerCamelCase__ : Optional[Any] = Image.open(__lowerCamelCase )
lowerCamelCase__ : Dict = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__lowerCamelCase ) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
if not prompts:
return []
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : int = []
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Tuple = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__lowerCamelCase , (tuple, list) ):
lowerCamelCase__ : Any = prompt[0]
lowerCamelCase__ : List[str] = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase__ : Union[str, Any] = prompt.split(":" )
lowerCamelCase__ : Optional[Any] = float(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] = prompt
lowerCamelCase__ : Optional[Any] = 1.0
processed_prompts.append(__lowerCamelCase )
weights.append(__lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__lowerCamelCase , device=self.device ),
}
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=None , ):
'''simple docstring'''
if image_path:
lowerCamelCase__ : Dict = self._get_latent(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__ : Union[str, Any] = self.process_prompts(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.process_prompts(__lowerCamelCase )
if save_final and save_path is None:
lowerCamelCase__ : Optional[int] = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] = save_path + '_' + get_timestamp()
os.makedirs(__lowerCamelCase )
lowerCamelCase__ : Tuple = save_path
lowerCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__lowerCamelCase ) )
lowerCamelCase__ : Tuple = loop_post_process(__lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
if show_intermediate:
show_pil(__lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"Image": wandb.Image(__lowerCamelCase )} )
if show_final:
show_pil(__lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
| 702
|
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 5
| 0
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[str]=[] ):
'''simple docstring'''
lowerCamelCase_ = size[0] - overlap_pixels * 2
lowerCamelCase_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase_ = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
lowerCamelCase_ = np.pad(__SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=__SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
lowerCamelCase_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : Tuple ):
'''simple docstring'''
return max(__SCREAMING_SNAKE_CASE , min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = list(__SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase_ = clamp_rect(__SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : str , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase_ = tile.crop(__SCREAMING_SNAKE_CASE )
return tile
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = n % d
return n - divisor
class A( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , A_ : AutoencoderKL , A_ : CLIPTextModel , A_ : CLIPTokenizer , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A_ : int = 350 , ) -> Any:
"""simple docstring"""
super().__init__(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , max_noise_level=UpperCamelCase__ , )
def a__ ( self : str , A_ : Any , A_ : int , A_ : Any , A_ : Tuple , A_ : Union[str, Any] , A_ : str , A_ : List[Any] , **A_ : Dict ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase_ = add_overlap_rect(UpperCamelCase__ , UpperCamelCase__ , image.size )
lowerCamelCase_ = image.crop(UpperCamelCase__ )
lowerCamelCase_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase_ = translated_slice_x - (original_image_slice / 2)
lowerCamelCase_ = max(0 , UpperCamelCase__ )
lowerCamelCase_ = squeeze_tile(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = to_input.size
lowerCamelCase_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase_ = super(UpperCamelCase__ , self ).__call__(image=UpperCamelCase__ , **UpperCamelCase__ ).images[0]
lowerCamelCase_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase_ = unsqueeze_tile(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase_ = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
lowerCamelCase_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCamelCase__ ) , mode='L' , )
final_image.paste(
UpperCamelCase__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , A_ : Union[str, List[str]] , A_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , A_ : int = 75 , A_ : float = 9.0 , A_ : int = 50 , A_ : Optional[Union[str, List[str]]] = None , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[torch.Generator] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , A_ : int = 128 , A_ : int = 32 , A_ : int = 32 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase_ = math.ceil(image.size[0] / tile_size )
lowerCamelCase_ = math.ceil(image.size[1] / tile_size )
lowerCamelCase_ = tcx * tcy
lowerCamelCase_ = 0
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
self._process_tile(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prompt=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , noise_level=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase_ = StableDiffusionTiledUpscalePipeline.from_pretrained(__SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to('cuda' )
lowerCamelCase_ = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(lowercase : Union[str, Any] ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
lowerCamelCase_ = pipe(image=__SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=__SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 70
|
"""simple docstring"""
from timeit import timeit
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__lowerCAmelCase: str = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__lowerCAmelCase: Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ) -> None:
def do_benchmark(__SCREAMING_SNAKE_CASE ) -> None:
__lowerCAmelCase: List[Any] = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__SCREAMING_SNAKE_CASE ) = }" )
__lowerCAmelCase: Optional[int] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__SCREAMING_SNAKE_CASE )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__SCREAMING_SNAKE_CASE ) = }" )
__lowerCAmelCase: Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__SCREAMING_SNAKE_CASE , )
print(F"timeit() runs in {timing} seconds" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(__SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 346
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , ):
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = apply_ocr
def snake_case ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "apply_ocr" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase )
self.assertIsInstance(encoding.boxes , UpperCamelCase )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self ):
"""simple docstring"""
# with apply_OCR = True
lowerCamelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase )
self.assertListEqual(encoding.boxes , UpperCamelCase )
# with apply_OCR = False
lowerCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 445
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : List[Any] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""OwlViTFeatureExtractor"""]
a_ : Any = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 445
| 1
|
def a__ ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Dict ):
if index == r:
for j in range(snake_case__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase : Optional[int] = arr[i]
combination_util(snake_case__ , snake_case__ , snake_case__ , index + 1 , snake_case__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a__ ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[Any] ):
# A temporary array to store all combination one by one
_UpperCAmelCase : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case__ , snake_case__ , snake_case__ , 0 , snake_case__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ : Optional[int] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 643
|
def a__ ( snake_case__ : int , snake_case__ : int ):
return 1 if input_a == input_a else 0
def a__ ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 643
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase = '<<<<<<< This should probably be modified because it mentions: '
__lowerCamelCase = '=======\n>>>>>>>\n'
__lowerCamelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__lowerCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _UpperCamelCase( __UpperCAmelCase ):
@staticmethod
def a__ ( _lowerCamelCase : List[str] ):
_UpperCAmelCase = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , *_lowerCamelCase : Tuple ):
_UpperCAmelCase = get_logger("datasets-cli/converting" )
_UpperCAmelCase = tfds_path
_UpperCAmelCase = datasets_directory
def a__ ( self : List[str] ):
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.listdir(UpperCAmelCase_ )
else:
_UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
_UpperCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if not os.path.isfile(UpperCAmelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = []
for line in lines:
_UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase = ""
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase = "from datasets import logging\n"
elif "getLogger" in out_line:
_UpperCAmelCase = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase = True
_UpperCAmelCase = list(filter(lambda _lowerCamelCase : e in out_line , UpperCAmelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_ ) + "\n" )
out_lines.append(UpperCAmelCase_ )
out_lines.append(UpperCAmelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , UpperCAmelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_UpperCAmelCase = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase = True
out_lines.append(UpperCAmelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase = f_name.replace(".py" , "" )
_UpperCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCAmelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCAmelCase_ )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.writelines(UpperCAmelCase_ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_UpperCAmelCase = os.path.basename(UpperCAmelCase_ )
_UpperCAmelCase = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCAmelCase_ , UpperCAmelCase_ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 711
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase:
__A: Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__A: Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
__A: int = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__A: Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A: Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__A: Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the training data."""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__A: Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the test data."""} )
def a__ ( self : List[str] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_UpperCAmelCase : Union[str, Any] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Optional[Any] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCamelCase:
__A: str = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A: str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : Any = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : Optional[int] = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Tuple = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_UpperCAmelCase : Tuple = load_dataset("csv" , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : List[str] = load_dataset("json" , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : Tuple = raw_datasets["train"].features["label"].names
_UpperCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : Optional[int] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : Tuple = {"Refused": 0, "Entailed": 1}
_UpperCAmelCase : Union[str, Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_SCREAMING_SNAKE_CASE ):
# Tokenize the texts
def _convert_table_text_to_pandas(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_UpperCAmelCase : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : List[str] = examples["statement"]
_UpperCAmelCase : int = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_UpperCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_UpperCAmelCase : Any = raw_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : Tuple = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Tuple = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_UpperCAmelCase : List[str] = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = p.predictions[0] if isinstance(p.predictions , _SCREAMING_SNAKE_CASE ) else p.predictions
_UpperCAmelCase : Tuple = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : List[str] = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : Dict = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : List[Any] = None
# Initialize our Trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = train_result.metrics
_UpperCAmelCase : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase : Tuple = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[str] = trainer.evaluate(eval_dataset=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : int = predict_dataset.remove_columns("label" )
_UpperCAmelCase : Union[str, Any] = trainer.predict(_SCREAMING_SNAKE_CASE , metric_key_prefix="predict" ).predictions
_UpperCAmelCase : List[str] = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
_UpperCAmelCase : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
_UpperCAmelCase : Tuple = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 328
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="v_prediction" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = "A painting of a squirrel eating a burger"
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_A = output.images
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__UpperCAmelCase , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="v_prediction" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = "A painting of a squirrel eating a burger"
_A = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_A = output.images
assert image.shape[0] == 2
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_A = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="v_prediction" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_A = unet.half()
_A = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_A = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = "A painting of a squirrel eating a burger"
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ).images
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "a cat sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "a cat sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_A = "stabilityai/stable-diffusion-x4-upscaler"
_A = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "a cat sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 330
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase_ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowercase ( __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = torch.load(__lowercase , map_location="cpu" )
_A = {}
_A = checkpoint["time_embed.0.weight"]
_A = checkpoint["time_embed.0.bias"]
_A = checkpoint["time_embed.2.weight"]
_A = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
_A = checkpoint["label_emb.weight"]
_A = checkpoint["input_blocks.0.0.weight"]
_A = checkpoint["input_blocks.0.0.bias"]
_A = unet_config["down_block_types"]
_A = unet_config["layers_per_block"]
_A = unet_config["attention_head_dim"]
_A = unet_config["block_out_channels"]
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = "mid_block.resnets.0"
_A = "middle_block.0"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.attentions.0"
_A = "middle_block.1"
_A = convert_attention(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.resnets.1"
_A = "middle_block.2"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = 0
_A = unet_config["up_block_types"]
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = checkpoint["out.0.weight"]
_A = checkpoint["out.0.bias"]
_A = checkpoint["out.2.weight"]
_A = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = strabool(args.class_cond)
lowerCamelCase_ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCamelCase_ = None
lowerCamelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCamelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 330
| 1
|
def UpperCAmelCase_ ( __UpperCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(__UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 588
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[Any] = "trocr"
_A : Optional[int] = ["past_key_values"]
_A : Dict = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : str ,_UpperCamelCase : Dict=5_0_2_6_5 ,_UpperCamelCase : int=1_0_2_4 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : Union[str, Any]=1_6 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : str="gelu" ,_UpperCamelCase : Dict=5_1_2 ,_UpperCamelCase : List[Any]=0.1 ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : Union[str, Any]=2 ,_UpperCamelCase : str=0.02 ,_UpperCamelCase : Any=0.0 ,_UpperCamelCase : Optional[int]=True ,_UpperCamelCase : str=False ,_UpperCamelCase : int=True ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : str=1 ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : str=2 ,**_UpperCamelCase : Any ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =vocab_size
SCREAMING_SNAKE_CASE__ =d_model
SCREAMING_SNAKE_CASE__ =decoder_layers
SCREAMING_SNAKE_CASE__ =decoder_attention_heads
SCREAMING_SNAKE_CASE__ =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ =activation_function
SCREAMING_SNAKE_CASE__ =max_position_embeddings
SCREAMING_SNAKE_CASE__ =dropout
SCREAMING_SNAKE_CASE__ =attention_dropout
SCREAMING_SNAKE_CASE__ =activation_dropout
SCREAMING_SNAKE_CASE__ =init_std
SCREAMING_SNAKE_CASE__ =decoder_layerdrop
SCREAMING_SNAKE_CASE__ =use_cache
SCREAMING_SNAKE_CASE__ =scale_embedding
SCREAMING_SNAKE_CASE__ =use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ =layernorm_embedding
super().__init__(
pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
| 588
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class __snake_case (unittest.TestCase ):
def __a ( self: List[str] ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=A_ , )
assert hasattr(self , """env""" )
def __a ( self: Dict , A_: int ):
__lowerCamelCase = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__lowerCamelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A_ , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A_ , py_version="""py36""" , )
def __a ( self: Union[str, Any] , A_: int ):
TrainingJobAnalytics(A_ ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __a ( self: int , A_: Any ):
# create estimator
__lowerCamelCase = self.create_estimator(A_ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A_ )
| 281
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
__magic_name__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__magic_name__ : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = CamembertTokenizer
__a = CamembertTokenizerFast
__a = True
__a = True
def __a ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __a ( self: Any ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 10_04 )
def __a ( self: Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self: Optional[Any] ):
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def __a ( self: List[str] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __a ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A_ , )
| 281
| 1
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase : Any =False
UpperCAmelCase : Tuple =logging.get_logger(__name__)
UpperCAmelCase : List[Any] ="""ybelkada/fonts"""
def _lowerCAmelCase ():
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch.")
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
requires_backends(_lowerCAmelCase , ["torch"])
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0)
UpperCamelCase_ = torch.nn.functional.unfold(_lowerCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width))
UpperCamelCase_ = patches.reshape(image_tensor.size(0) , image_tensor.size(1) , _lowerCAmelCase , _lowerCAmelCase , -1)
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1).reshape(
image_tensor.size(2) // patch_height , image_tensor.size(3) // patch_width , image_tensor.size(1) * patch_height * patch_width , )
return patches.unsqueeze(0)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase = 36 , _lowerCAmelCase = "black" , _lowerCAmelCase = "white" , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = 5 , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
requires_backends(_lowerCAmelCase , "vision")
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80)
UpperCamelCase_ = wrapper.wrap(text=_lowerCAmelCase)
UpperCamelCase_ = "\n".join(_lowerCAmelCase)
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(_lowerCAmelCase)
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(_lowerCAmelCase , "Arial.TTF")
UpperCamelCase_ = ImageFont.truetype(_lowerCAmelCase , encoding="UTF-8" , size=_lowerCAmelCase)
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , _lowerCAmelCase))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , _lowerCAmelCase , _lowerCAmelCase)
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , _lowerCAmelCase)
UpperCamelCase_ = ImageDraw.Draw(_lowerCAmelCase)
draw.text(xy=(left_padding, top_padding) , text=_lowerCAmelCase , fill=_lowerCAmelCase , font=_lowerCAmelCase)
return image
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase):
requires_backends(_lowerCAmelCase , "vision")
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(_lowerCAmelCase)
UpperCamelCase_ = render_text(_lowerCAmelCase , **_lowerCAmelCase)
UpperCamelCase_ = max(header_image.width , image.width)
UpperCamelCase_ = int(image.height * (new_width / image.width))
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width))
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white")
new_image.paste(header_image.resize((new_width, new_header_height)) , (0, 0))
new_image.paste(image.resize((new_width, new_height)) , (0, new_header_height))
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(_lowerCAmelCase)
if infer_channel_dimension_format(_lowerCAmelCase) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST)
return new_image
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""flattened_patches"""]
def __init__( self , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = 2048 , snake_case__ = False , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(snake_case__ , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(snake_case__ )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , snake_case__ ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , snake_case__ ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=snake_case__ , antialias=snake_case__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(snake_case__ ).reshape([rows, 1] ).repeat(1 , snake_case__ ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(snake_case__ ).reshape([1, columns] ).repeat(snake_case__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(snake_case__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(snake_case__ )
return result
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(snake_case__ )
UpperCamelCase_ = np.std(snake_case__ )
UpperCamelCase_ = max(snake_case__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , snake_case__ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , snake_case__ )
UpperCamelCase_ = kwargs.pop("font_path" , snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [header_text] * len(snake_case__ )
UpperCamelCase_ = [
render_header(snake_case__ , header_text[i] , font_bytes=snake_case__ , font_path=snake_case__ )
for i, image in enumerate(snake_case__ )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=snake_case__ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=snake_case__ , max_patches=snake_case__ , patch_size=snake_case__ )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=snake_case__ )
return encoded_outputs
| 717
|
import sys
UpperCAmelCase : Union[str, Any] =(
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase (_lowerCAmelCase = N):
UpperCamelCase_ = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase) - 12):
UpperCamelCase_ = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
UpperCamelCase_ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 504
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case : List[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''OwlViTFeatureExtractor''']
snake_case : Optional[int] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 445
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
a :Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
i -= len(UpperCAmelCase_ )
a :Tuple = i // 3
a :int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a :Union[str, Any] = (
chars_incl
+ random(UpperCAmelCase_ , quotient + remainder )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
)
a :Dict = list(UpperCAmelCase_ )
shuffle(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
if len(UpperCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
a :Dict = any(char in ascii_uppercase for char in password )
a :Optional[int] = any(char in ascii_lowercase for char in password )
a :Tuple = any(char in digits for char in password )
a :Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = int(input('''Please indicate the max length of your password: ''' ).strip() )
a :Union[str, Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCAmelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 445
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__UpperCamelCase = field(
default=1_2_8, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase = field(
default=a, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase__ =processors[data_args.task_name]()
UpperCAmelCase__ =processor.get_labels()
UpperCAmelCase__ =len(A )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A ) -> Dict:
UpperCAmelCase__ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A , p.label_ids )}
# Data collator
UpperCAmelCase__ =DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase__ =Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase__ ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__ =trainer.evaluate()
UpperCAmelCase__ =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
results.update(A )
return results
def _UpperCAmelCase ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 714
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =torch.load(A , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase__ =torch.load(A , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase__ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
UpperCAmelCase__ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase__ =sd.pop(A )
UpperCAmelCase__ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase__ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase__ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase__ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =torch.split(A , depth // 3 , dim=0 )
UpperCAmelCase__ =q
UpperCAmelCase__ =k
UpperCAmelCase__ =v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( A , A , A=None ):
'''simple docstring'''
UpperCAmelCase__ =load_checkpoint(A )
if config is not None:
UpperCAmelCase__ =OPTConfig.from_pretrained(A )
else:
UpperCAmelCase__ =OPTConfig()
UpperCAmelCase__ =OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCamelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 510
| 0
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowercase : str = TypeVar('T')
class __UpperCamelCase ( Generic[T] ):
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[Any] = deque()
__a : int = set()
if not n:
__a : Any = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__a : Tuple = n
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a : Dict = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for k in self.dq_store:
print(__a )
def __repr__( self ):
'''simple docstring'''
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 476
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.dummy_uncond_unet
__a : Any = DDIMScheduler()
__a : Optional[int] = self.dummy_vq_model
__a : List[str] = LDMPipeline(unet=__a , vqvae=__a , scheduler=__a )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
__a : Dict = torch.manual_seed(0 )
__a : Dict = ldm(generator=__a , num_inference_steps=2 , output_type='numpy' ).images
__a : str = torch.manual_seed(0 )
__a : List[Any] = ldm(generator=__a , num_inference_steps=2 , output_type='numpy' , return_dict=__a )[0]
__a : Optional[Any] = image[0, -3:, -3:, -1]
__a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__a : Optional[Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
__a : str = torch.manual_seed(0 )
__a : List[Any] = ldm(generator=__a , num_inference_steps=5 , output_type='numpy' ).images
__a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
__a : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 476
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , a : Optional[Any] , a : Union[str, Any]=13 , a : int=7 , a : Dict=True , a : Any=True , a : int=True , a : List[Any]=True , a : Union[str, Any]=99 , a : str=32 , a : Any=5 , a : Dict=4 , a : str=4 , a : Optional[Any]="gelu" , a : Optional[int]=0.0 , a : Dict=0.1 , a : Any=True , a : Tuple=512 , a : Union[str, Any]=16 , a : str=2 , a : int=0.02 , a : Tuple=3 , a : Optional[Any]=4 , a : Union[str, Any]=None , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_multiple_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = weight_tying
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str , a : Optional[int] , a : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseModel(config=A__ )
model.to(A__ )
model.eval()
lowercase__ = model(A__ , attention_mask=A__ )
lowercase__ = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Union[str, Any] , a : str )-> str:
"""simple docstring"""
lowercase__ = True
lowercase__ = GPTNeoXJapaneseModel(A__ )
model.to(A__ )
model.eval()
lowercase__ = model(A__ , attention_mask=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Union[str, Any] , a : Any , a : int )-> Any:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseForCausalLM(config=A__ )
model.to(A__ )
model.eval()
lowercase__ = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[Any] , a : Tuple , a : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = GPTNeoXJapaneseForCausalLM(config=A__ )
model.to(A__ )
model.eval()
# first forward pass
lowercase__ = model(A__ , attention_mask=A__ , use_cache=A__ )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ = model(A__ , attention_mask=A__ , output_hidden_states=A__ )
lowercase__ = output_from_no_past['hidden_states'][0]
lowercase__ = model(
A__ , attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )['hidden_states'][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (_lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCamelCase : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseModelTester(self )
lowercase__ = ConfigTester(self , config_class=A__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__ = None
self.model_tester.create_and_check_model_as_decoder(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A__ , A__ , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
lowercase__ = 'abeja/gpt-neox-japanese-2.7b'
lowercase__ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
lowercase__ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
lowercase__ = GPTNeoXJapaneseTokenizer.from_pretrained(A__ )
lowercase__ = GPTNeoXJapaneseForCausalLM.from_pretrained(A__ )
lowercase__ = []
for prompt in prompts:
lowercase__ = tokenizer(A__ , return_tensors='pt' ).input_ids
lowercase__ = model.generate(A__ , max_length=50 )
lowercase__ = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
predicted_outputs += generated_string
self.assertListEqual(A__ , A__ )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
__a : Union[str, Any] = FlaxAutoencoderKL
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 4
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : List[str] = (32, 32)
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : str = jax.random.uniform(lowerCAmelCase__ ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_input
return init_dict, inputs_dict
| 105
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''OwlViTFeatureExtractor''']
a : List[Any] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 633
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase = None ):
'''simple docstring'''
__a : Optional[Any] = value
__a : Optional[Any] = None # Added in order to delete a node easier
__a : List[str] = None
__a : Dict = None
def __repr__(self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase = None ):
'''simple docstring'''
__a : Any = root
def __str__(self ):
'''simple docstring'''
return str(self.root )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if new_children is not None: # reset its kids
__a : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case_ ): # If it is the right children
__a : Tuple = new_children
else:
__a : Optional[Any] = new_children
else:
__a : str = new_children
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.root is None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = Node(snake_case_ ) # create a new Node
if self.empty(): # if Tree is empty
__a : Dict = new_node # set its root
else: # Tree is not empty
__a : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__a : Any = new_node # We insert the new node in a leaf
break
else:
__a : Tuple = parent_node.left
else:
if parent_node.right is None:
__a : Union[str, Any] = new_node
break
else:
__a : Optional[int] = parent_node.right
__a : Tuple = parent_node
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
for value in values:
self.__insert(snake_case_ )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__a : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__a : Tuple = node.left if value < node.value else node.right
return node
def lowerCAmelCase__(self , _lowercase = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
__a : List[Any] = self.root
if not self.empty():
while node.right is not None:
__a : Optional[Any] = node.right
return node
def lowerCAmelCase__(self , _lowercase = None ):
'''simple docstring'''
if node is None:
__a : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
__a : str = self.root
while node.left is not None:
__a : str = node.left
return node
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = self.search(snake_case_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case_ , snake_case_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case_ , node.left )
else:
__a : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__a : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCAmelCase__(self , _lowercase=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if node:
self.inorder(snake_case_ , node.left )
arr.append(node.value )
self.inorder(snake_case_ , node.right )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = []
self.inorder(snake_case_ , snake_case_ ) # append all values to list using inorder traversal
return arr[k - 1]
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
__a : Optional[Any] = []
if curr_node is not None:
__a : Optional[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __magic_name__ ( ):
__a : Any = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
__a : List[str] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63
| 0
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ : int = namedtuple('covid_data', 'cases deaths recovered')
def lowerCamelCase__ (_UpperCAmelCase = "https://www.worldometers.info/coronavirus/"):
SCREAMING_SNAKE_CASE = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(_UpperCAmelCase).content).xpath(_UpperCAmelCase))
a_ : int = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 73
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowercase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
__lowercase = {
'camembert-base': 5_1_2,
}
__lowercase = '▁'
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_lowerCAmelCase))
lowerCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCAmelCase = len(self.fairseq_tokens_to_ids)
lowerCAmelCase = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase)) + [1]
return [1] + ([0] * len(_lowerCAmelCase)) + [1, 1] + ([0] * len(_lowerCAmelCase)) + [1]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def a_ ( self):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_lowerCAmelCase)
lowerCAmelCase = False
out_string += self.sp_model.decode(_lowerCAmelCase)
return out_string.strip()
def __getstate__( self):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , """wb""") as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 709
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__lowercase = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
__lowercase = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
__lowercase = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def snake_case__ ( _A: Optional[Any] , _A: int , _A: int , _A: bool , _A: Optional[Dict[int, int]] = None , _A: bool = False , ) -> List[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase = new_id
# turn into Numpy arrays
lowerCAmelCase = np.array(_A )
lowerCAmelCase = np.array(_A )
if reduce_labels:
lowerCAmelCase = 255
lowerCAmelCase = label - 1
lowerCAmelCase = 255
lowerCAmelCase = label != ignore_index
lowerCAmelCase = np.not_equal(_A , _A )
lowerCAmelCase = pred_label[mask]
lowerCAmelCase = np.array(_A )[mask]
lowerCAmelCase = pred_label[pred_label == label]
lowerCAmelCase = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def snake_case__ ( _A: Union[str, Any] , _A: Any , _A: Union[str, Any] , _A: bool , _A: Optional[Dict[int, int]] = None , _A: bool = False , ) -> int:
'''simple docstring'''
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_A , _A ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = intersect_and_union(
_A , _A , _A , _A , _A , _A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def snake_case__ ( _A: Union[str, Any] , _A: int , _A: Dict , _A: bool , _A: Optional[int] = None , _A: Optional[Dict[int, int]] = None , _A: bool = False , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_intersect_and_union(
_A , _A , _A , _A , _A , _A )
# compute metrics
lowerCAmelCase = {}
lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase = total_area_intersect / total_area_union
lowerCAmelCase = total_area_intersect / total_area_label
lowerCAmelCase = np.nanmean(_A )
lowerCAmelCase = np.nanmean(_A )
lowerCAmelCase = all_acc
lowerCAmelCase = iou
lowerCAmelCase = acc
if nan_to_num is not None:
lowerCAmelCase = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ):
"""simple docstring"""
lowerCAmelCase = mean_iou(
results=__lowerCAmelCase , gt_seg_maps=__lowerCAmelCase , num_labels=__lowerCAmelCase , ignore_index=__lowerCAmelCase , nan_to_num=__lowerCAmelCase , label_map=__lowerCAmelCase , reduce_labels=__lowerCAmelCase , )
return iou_result
| 605
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( _A ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 264
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 306
|
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(["""image"""])
_lowerCAmelCase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""image"""])
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """image""", """negative_prompt"""])
_lowerCAmelCase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_lowerCAmelCase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""image""", """mask_image"""])
_lowerCAmelCase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""example_image""", """image""", """mask_image"""])
_lowerCAmelCase = frozenset(["""class_labels"""])
_lowerCAmelCase = frozenset(["""class_labels"""])
_lowerCAmelCase = frozenset(["""batch_size"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(["""batch_size"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""])
_lowerCAmelCase = frozenset(["""input_tokens"""])
_lowerCAmelCase = frozenset(["""input_tokens"""])
| 306
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _snake_case ( _a ):
_A : List[str] = '''xmod'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=30_522 ,SCREAMING_SNAKE_CASE__ : Optional[int]=768 ,SCREAMING_SNAKE_CASE__ : Tuple=12 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=12 ,SCREAMING_SNAKE_CASE__ : Dict=3_072 ,SCREAMING_SNAKE_CASE__ : int="gelu" ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=512 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-12 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : List[str]="absolute" ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=("en_XX",) ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = hidden_size
SCREAMING_SNAKE_CASE:List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE:str = num_attention_heads
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Dict = max_position_embeddings
SCREAMING_SNAKE_CASE:Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE:str = initializer_range
SCREAMING_SNAKE_CASE:Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE:Any = use_cache
SCREAMING_SNAKE_CASE:Dict = classifier_dropout
SCREAMING_SNAKE_CASE:List[str] = pre_norm
SCREAMING_SNAKE_CASE:Union[str, Any] = adapter_reduction_factor
SCREAMING_SNAKE_CASE:Optional[int] = adapter_layer_norm
SCREAMING_SNAKE_CASE:Any = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE:Optional[Any] = ln_before_adapter
SCREAMING_SNAKE_CASE:Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = default_language
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : Tuple ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 143
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self : Any ):
SCREAMING_SNAKE_CASE:List[Any] = psutil.Process()
SCREAMING_SNAKE_CASE:Dict = False
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Any = -1
while True:
SCREAMING_SNAKE_CASE:Dict = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[Any] = True
SCREAMING_SNAKE_CASE:str = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE:Tuple = True
self.thread.start()
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
A_ = PeakCPUMemory()
def A_ ( ):
# Time
SCREAMING_SNAKE_CASE:int = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE:int = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE:Tuple = torch.cuda.memory_allocated(snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def A_ ( snake_case ):
# Time
SCREAMING_SNAKE_CASE:Optional[Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE:Any = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
SCREAMING_SNAKE_CASE:Dict = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE:List[str] = (torch.cuda.memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
SCREAMING_SNAKE_CASE:Union[str, Any] = (torch.cuda.max_memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
return measures
def A_ ( snake_case , snake_case ):
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(snake_case )]:.2f}MiB''' )
SCREAMING_SNAKE_CASE:str = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 143
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = "pixel_values"
__lowerCamelCase : int = False
__lowerCamelCase : List[Any] = TimmBackboneConfig
def __init__( self, lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, """timm""" )
super().__init__(lowerCamelCase__ )
A : Optional[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(lowerCamelCase__, """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
A : Optional[int] = getattr(lowerCamelCase__, """use_pretrained_backbone""", lowerCamelCase__ )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
A : Dict = config.out_indices if getattr(lowerCamelCase__, """out_indices""", lowerCamelCase__ ) is not None else (-1,)
A : Union[str, Any] = timm.create_model(
config.backbone, pretrained=lowerCamelCase__, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase__, **lowerCamelCase__, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A : Tuple = self._backbone.return_layers
A : Optional[Any] = {layer["""module"""]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
A : Union[str, Any] = kwargs.pop("""config""", TimmBackboneConfig() )
A : Union[str, Any] = kwargs.pop("""use_timm_backbone""", lowerCamelCase__ )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
A : Any = kwargs.pop("""num_channels""", config.num_channels )
A : Any = kwargs.pop("""features_only""", config.features_only )
A : int = kwargs.pop("""use_pretrained_backbone""", config.use_pretrained_backbone )
A : int = kwargs.pop("""out_indices""", config.out_indices )
A : int = TimmBackboneConfig(
backbone=lowerCamelCase__, num_channels=lowerCamelCase__, features_only=lowerCamelCase__, use_pretrained_backbone=lowerCamelCase__, out_indices=lowerCamelCase__, )
return super()._from_config(lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
pass
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__ ):
A : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A : int = self._all_layers
A : Tuple = self._backbone(lowerCamelCase__, **lowerCamelCase__ )
A : int = self._return_layers
A : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
A : Union[str, Any] = self._backbone(lowerCamelCase__, **lowerCamelCase__ )
A : str = None
A : Optional[int] = tuple(lowerCamelCase__ )
A : str = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
A : Any = (feature_maps,)
if output_hidden_states:
A : Dict = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__, hidden_states=lowerCamelCase__, attentions=lowerCamelCase__ )
| 712
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 520
| 0
|
import requests
from bsa import BeautifulSoup
def __magic_name__ ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__a : Tuple = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 3_0,
'''pages''': '''3979-3990''',
'''year''': 2_0_1_8,
'''hl''': '''en''',
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 606
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] ="transfo-xl"
_UpperCAmelCase : str =["mems"]
_UpperCAmelCase : Optional[int] ={
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , lowerCAmelCase : Tuple=26_77_35 , lowerCAmelCase : List[Any]=[2_00_00, 4_00_00, 20_00_00] , lowerCAmelCase : Dict=10_24 , lowerCAmelCase : List[str]=10_24 , lowerCAmelCase : int=16 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : str=40_96 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Any=False , lowerCAmelCase : List[str]=18 , lowerCAmelCase : Any=16_00 , lowerCAmelCase : List[str]=10_00 , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Tuple=-1 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]="normal" , lowerCAmelCase : Tuple=0.0_1 , lowerCAmelCase : Optional[int]=0.0_1 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Dict=1e-5 , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : List[str] , ):
A_ = vocab_size
A_ = []
self.cutoffs.extend(lowerCAmelCase )
if proj_share_all_but_first:
A_ = [False] + [True] * len(self.cutoffs )
else:
A_ = [False] + [False] * len(self.cutoffs )
A_ = d_model
A_ = d_embed
A_ = d_head
A_ = d_inner
A_ = div_val
A_ = pre_lnorm
A_ = n_layer
A_ = n_head
A_ = mem_len
A_ = same_length
A_ = attn_type
A_ = clamp_len
A_ = sample_softmax
A_ = adaptive
A_ = dropout
A_ = dropatt
A_ = untie_r
A_ = init
A_ = init_range
A_ = proj_init_std
A_ = init_std
A_ = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase , **lowerCAmelCase )
@property
def _UpperCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 452
| 0
|
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase__ : Any = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase__ : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase__ : Tuple = False
break
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool"
return status
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase__ : str = list(range(2 , n + 1 ) )
lowerCAmelCase__ : int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 , len(UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase__ : str = 0
# filters actual prime numbers.
lowerCAmelCase__ : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase__ : int = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase ):
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase__ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : Union[str, Any] = number
if number == 0 or number == 1:
ans.append(UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase ):
while quotient != 1:
if is_prime(UpperCamelCase ) and (quotient % factor == 0):
ans.append(UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : Any = 0
# prime factorization of 'number'
lowerCAmelCase__ : Optional[int] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = max(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : Any = 0
# prime factorization of 'number'
lowerCAmelCase__ : Optional[int] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : List[str] = min(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase__ : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase__ : List[str] = get_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase )
# run variable for while-loops.
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = None
# exit variable. for break up the loops
lowerCAmelCase__ : int = True
while i < len_pn and loop:
lowerCAmelCase__ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase__ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (len(UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : Any = 0
while numbera != 0:
lowerCAmelCase__ : int = numbera % numbera
lowerCAmelCase__ : List[Any] = numbera
lowerCAmelCase__ : List[str] = rest
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase__ : Tuple = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : List[Any] = prime_factorization(UpperCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Optional[Any] = max(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase__ : Optional[Any] = prime_fac_a.count(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = prime_fac_a.count(UpperCamelCase )
for _ in range(max(UpperCamelCase , UpperCamelCase ) ):
ans *= n
else:
lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase ):
ans += 1
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime(
UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase__ : List[str] = p_number_a + 1 # jump to the next number
lowerCAmelCase__ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and ans[0] != p_number_a
and ans[len(UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase__ : Union[str, Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase__ : List[str] = get_divisors(UpperCamelCase )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase__ : List[str] = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase__ : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase__ : List[str] = ans
ans += fiba
lowerCAmelCase__ : List[str] = tmp
return ans
| 160
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[int] = None
@experimental
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return _map_with_joblib(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = num_proc if num_proc <= len(UpperCamelCase ) else len(UpperCamelCase )
lowerCAmelCase__ : str = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = len(UpperCamelCase ) // num_proc
lowerCAmelCase__ : List[str] = len(UpperCamelCase ) % num_proc
lowerCAmelCase__ : Dict = div * index + min(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(UpperCamelCase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(UpperCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = None, None
if not disable_tqdm:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = (RLock(),), tqdm.set_lock
with Pool(UpperCamelCase , initargs=UpperCamelCase , initializer=UpperCamelCase ) as pool:
lowerCAmelCase__ : int = pool.map(UpperCamelCase , UpperCamelCase )
logger.info(f"""Finished {num_proc} processes""" )
lowerCAmelCase__ : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(UpperCamelCase )} objects""" )
return mapped
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCamelCase ):
return joblib.Parallel()(
joblib.delayed(UpperCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCAmelCase__ : Union[str, Any] = None
| 160
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase_ = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase_ = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase_ = df.iloc[:, 1:2]
UpperCamelCase_ = actual_data.values.reshape(len_data, 1)
UpperCamelCase_ = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase_ = 10
UpperCamelCase_ = 5
UpperCamelCase_ = 20
UpperCamelCase_ = len_data - periods * look_back
UpperCamelCase_ = actual_data[:division]
UpperCamelCase_ = actual_data[division - look_back :]
UpperCamelCase_ , UpperCamelCase_ = [], []
UpperCamelCase_ , UpperCamelCase_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase_ = np.array(train_x)
UpperCamelCase_ = np.array(test_x)
UpperCamelCase_ = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase_ = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase_ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase_ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase_ = model.predict(x_test)
| 92
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Union[str, Any] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = 42
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Dict , UpperCamelCase_ : PriorTransformer , UpperCamelCase_ : CLIPVisionModel , UpperCamelCase_ : CLIPImageProcessor , UpperCamelCase_ : HeunDiscreteScheduler , UpperCamelCase_ : ShapERenderer , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
if latents is None:
lowerCamelCase_ : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ : List[str] = latents.to(UpperCamelCase_ )
lowerCamelCase_ : int = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
lowerCamelCase_ : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Any = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCamelCase_ : Any = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ : str = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowerCamelCase_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ : List[str] = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ : Dict = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 25 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : float = 4.0 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowerCamelCase_ : List[Any] = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCamelCase_ : Union[str, Any] = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ : str = len(UpperCamelCase_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}""" )
lowerCamelCase_ : List[Any] = self._execution_device
lowerCamelCase_ : Optional[Any] = batch_size * num_images_per_prompt
lowerCamelCase_ : Optional[int] = guidance_scale > 1.0
lowerCamelCase_ : Any = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowerCamelCase_ : Dict = self.scheduler.timesteps
lowerCamelCase_ : List[Any] = self.prior.config.num_embeddings
lowerCamelCase_ : Optional[int] = self.prior.config.embedding_dim
lowerCamelCase_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ : Tuple = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Tuple = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ : List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = noise_pred.chunk(2 )
lowerCamelCase_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ : str = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowerCamelCase_ : Dict = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowerCamelCase_ : Optional[int] = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCamelCase_ : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ : str = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 501
| 0
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a_ :
def __init__( self :List[Any] , _lowercase :Union[str, Any] , _lowercase :List[str]=13 , _lowercase :Union[str, Any]=7 , _lowercase :str=True , _lowercase :Any=True , _lowercase :str=True , _lowercase :int=True , _lowercase :Tuple=99 , _lowercase :Tuple=[1, 1, 2] , _lowercase :str=1 , _lowercase :Optional[int]=32 , _lowercase :Optional[Any]=4 , _lowercase :List[str]=8 , _lowercase :Any=37 , _lowercase :str="gelu_new" , _lowercase :Optional[Any]=0.1 , _lowercase :Dict=0.1 , _lowercase :List[str]=0.0 , _lowercase :List[str]=512 , _lowercase :List[Any]=3 , _lowercase :List[Any]=0.02 , _lowercase :Dict=3 , _lowercase :Dict=4 , _lowercase :Any=None , _lowercase :Dict=False , ) -> int:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = block_sizes
UpperCAmelCase_ = num_decoder_layers
UpperCAmelCase_ = d_model
UpperCAmelCase_ = n_head
UpperCAmelCase_ = d_head
UpperCAmelCase_ = d_inner
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = 2
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCAmelCase_ = n_head
# Used in the tests to check the size of the first hidden state
UpperCAmelCase_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCAmelCase_ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCAmelCase_ = self.num_hidden_layers + 2
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __a ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :List[Any] , _lowercase :Any , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , ) -> Tuple:
UpperCAmelCase_ = TFFunnelModel(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
UpperCAmelCase_ = False
UpperCAmelCase_ = TFFunnelModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
UpperCAmelCase_ = False
UpperCAmelCase_ = TFFunnelModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def __a ( self :Dict , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Union[str, Any] , ) -> Optional[int]:
UpperCAmelCase_ = TFFunnelBaseModel(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
UpperCAmelCase_ = False
UpperCAmelCase_ = TFFunnelBaseModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
UpperCAmelCase_ = False
UpperCAmelCase_ = TFFunnelBaseModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def __a ( self :Tuple , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Any , ) -> Dict:
UpperCAmelCase_ = TFFunnelForPreTraining(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def __a ( self :Optional[Any] , _lowercase :Tuple , _lowercase :List[Any] , _lowercase :str , _lowercase :List[str] , _lowercase :List[Any] , _lowercase :Dict , _lowercase :List[Any] , ) -> List[Any]:
UpperCAmelCase_ = TFFunnelForMaskedLM(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :str , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :Any , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Union[str, Any] , ) -> Tuple:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFFunnelForSequenceClassification(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :List[Any] , _lowercase :Union[str, Any] , _lowercase :List[Any] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :int , _lowercase :Optional[int] , ) -> Dict:
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = TFFunnelForMultipleChoice(config=_lowercase)
UpperCAmelCase_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __a ( self :List[str] , _lowercase :Any , _lowercase :Any , _lowercase :Union[str, Any] , _lowercase :Any , _lowercase :List[str] , _lowercase :Optional[int] , _lowercase :Dict , ) -> str:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFFunnelForTokenClassification(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __a ( self :List[str] , _lowercase :Union[str, Any] , _lowercase :List[Any] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :Tuple , _lowercase :List[str] , ) -> str:
UpperCAmelCase_ = TFFunnelForQuestionAnswering(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Any =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : Dict =(
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Tuple =False
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = TFFunnelModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase)
def __a ( self :str) -> Tuple:
self.config_tester.run_common_tests()
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase)
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase)
@require_tf
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Any =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[Any] =False
def __a ( self :str) -> Dict:
UpperCAmelCase_ = TFFunnelModelTester(self , base=_lowercase)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase)
def __a ( self :List[Any]) -> List[Any]:
self.config_tester.run_common_tests()
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_lowercase)
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase)
def __a ( self :str) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase)
| 561
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase_ = logging.getLogger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="summarization"
UpperCamelCase__ : Union[str, Any] =["loss"]
UpperCamelCase__ : Tuple =ROUGE_KEYS
UpperCamelCase__ : List[Any] ="rouge2"
def __init__( self :int , _lowercase :List[Any] , **_lowercase :Tuple) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_lowercase , num_labels=_lowercase , mode=self.mode , **_lowercase)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
UpperCAmelCase_ = Path(self.output_dir) / '''metrics.json'''
UpperCAmelCase_ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(_lowercase)
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
UpperCAmelCase_ = get_git_info()['''repo_sha''']
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowercase):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __a ( self :List[Any] , _lowercase :Dict[str, torch.Tensor]) -> Dict[str, List[str]]:
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_lowercase , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
UpperCAmelCase_ = True
return readable_batch
def __a ( self :Dict , _lowercase :Optional[Any] , **_lowercase :List[Any]) -> str:
return self.model(_lowercase , **_lowercase)
def __a ( self :Tuple , _lowercase :List[int]) -> str:
UpperCAmelCase_ = self.tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase)
return lmap(str.strip , _lowercase)
def __a ( self :List[str] , _lowercase :dict) -> Tuple:
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase_ = batch['''labels''']
if isinstance(self.model , _lowercase):
UpperCAmelCase_ = self.model._shift_right(_lowercase)
else:
UpperCAmelCase_ = shift_tokens_right(_lowercase , _lowercase)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(_lowercase)
UpperCAmelCase_ = self(_lowercase , attention_mask=_lowercase , decoder_input_ids=_lowercase , use_cache=_lowercase)
UpperCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_lowercase)
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
UpperCAmelCase_ = nn.functional.log_softmax(_lowercase , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
_lowercase , _lowercase , self.hparams.label_smoothing , ignore_index=_lowercase)
return (loss,)
@property
def __a ( self :List[Any]) -> int:
return self.tokenizer.pad_token_id
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
# tokens per batch
UpperCAmelCase_ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].shape[0]
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __a ( self :Union[str, Any] , _lowercase :int , _lowercase :List[Any]) -> Dict:
return self._generative_step(_lowercase)
def __a ( self :int , _lowercase :List[str] , _lowercase :List[Any]="val") -> Dict:
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
UpperCAmelCase_ = losses['''loss''']
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(_lowercase).type_as(_lowercase)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_lowercase)
UpperCAmelCase_ = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(_lowercase) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __a ( self :int , _lowercase :Optional[int] , _lowercase :Dict) -> Dict:
return calculate_rouge(_lowercase , _lowercase)
def __a ( self :Optional[Any] , _lowercase :dict) -> dict:
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(_lowercase)
UpperCAmelCase_ = self.ids_to_clean_text(batch['''labels'''])
UpperCAmelCase_ = self._step(_lowercase)
UpperCAmelCase_ = dict(zip(self.loss_names , _lowercase))
UpperCAmelCase_ = self.calc_generative_metrics(_lowercase , _lowercase)
UpperCAmelCase_ = np.mean(lmap(_lowercase , _lowercase))
base_metrics.update(gen_time=_lowercase , gen_len=_lowercase , preds=_lowercase , target=_lowercase , **_lowercase)
return base_metrics
def __a ( self :Optional[Any] , _lowercase :int , _lowercase :Optional[Any]) -> Optional[int]:
return self._generative_step(_lowercase)
def __a ( self :str , _lowercase :List[Any]) -> List[Any]:
return self.validation_epoch_end(_lowercase , prefix='''test''')
def __a ( self :Union[str, Any] , _lowercase :Optional[int]) -> SeqaSeqDataset:
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_lowercase , n_obs=_lowercase , max_target_length=_lowercase , **self.dataset_kwargs , )
return dataset
def __a ( self :str , _lowercase :str , _lowercase :int , _lowercase :bool = False) -> DataLoader:
UpperCAmelCase_ = self.get_dataset(_lowercase)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(_lowercase , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
_lowercase , batch_sampler=_lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowercase , batch_size=_lowercase , collate_fn=dataset.collate_fn , shuffle=_lowercase , num_workers=self.num_workers , sampler=_lowercase , )
def __a ( self :int) -> DataLoader:
UpperCAmelCase_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowercase)
return dataloader
def __a ( self :int) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def __a ( self :List[str]) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def __a ( _lowercase :List[Any] , _lowercase :str) -> List[Any]:
BaseTransformer.add_model_specific_args(_lowercase , _lowercase)
add_generic_args(_lowercase , _lowercase)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowercase)
parser.add_argument('''--max_tokens_per_batch''' , type=_lowercase , default=_lowercase)
parser.add_argument('''--logger_name''' , type=_lowercase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=_lowercase , default=500 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=_lowercase , default=-1 , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=_lowercase , default='''summarization''' , required=_lowercase , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=_lowercase , default=0.0 , required=_lowercase)
parser.add_argument('''--src_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--tgt_lang''' , type=_lowercase , default='''''' , required=_lowercase)
parser.add_argument('''--eval_beams''' , type=_lowercase , default=_lowercase , required=_lowercase)
parser.add_argument(
'''--val_metric''' , type=_lowercase , default=_lowercase , required=_lowercase , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=_lowercase , default=_lowercase , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=_lowercase , default=1 , required=_lowercase , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=_lowercase , default=-1 , required=_lowercase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class a_ ( _snake_case ):
UpperCamelCase__ : str ="translation"
UpperCamelCase__ : str =["loss"]
UpperCamelCase__ : Optional[int] =["bleu"]
UpperCamelCase__ : List[str] ="bleu"
def __init__( self :Optional[int] , _lowercase :Optional[Any] , **_lowercase :Union[str, Any]) -> int:
super().__init__(_lowercase , **_lowercase)
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def __a ( self :Dict , _lowercase :str , _lowercase :List[Any]) -> dict:
return calculate_bleu(_lowercase , _lowercase)
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__UpperCAmelCase )
check_output_dir(__UpperCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__UpperCAmelCase )
else:
UpperCAmelCase_ = TranslationModule(__UpperCAmelCase )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , __UpperCAmelCase )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__UpperCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == '''loss'''
UpperCAmelCase_ = generic_train(
__UpperCAmelCase , __UpperCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCAmelCase ) , early_stopping_callback=__UpperCAmelCase , logger=__UpperCAmelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__UpperCAmelCase ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = pl.Trainer.add_argparse_args(parser)
UpperCamelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase_ = parser.parse_args()
main(args)
| 561
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
A_ = KandinskyVaaImgaImgPipeline
A_ = ['image_embeds', 'negative_image_embeds', 'image']
A_ = [
'image_embeds',
'negative_image_embeds',
'image',
]
A_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ = False
@property
def snake_case_( self )-> Optional[Any]:
return 3_2
@property
def snake_case_( self )-> int:
return 3_2
@property
def snake_case_( self )-> Any:
return self.time_input_dim
@property
def snake_case_( self )-> Union[str, Any]:
return self.time_input_dim * 4
@property
def snake_case_( self )-> Dict:
return 1_0_0
@property
def snake_case_( self )-> List[str]:
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def snake_case_( self )-> str:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_( self )-> Tuple:
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_( self )-> List[str]:
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**_lowerCamelCase )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case_( self , _lowerCamelCase , _lowerCamelCase=0 )-> List[str]:
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
lowercase__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(_lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(_lowerCamelCase )
else:
lowercase__ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case_( self )-> Optional[Any]:
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowerCamelCase )
lowercase__ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase__ = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self )-> Any:
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = '''A red cartoon frog, 4k'''
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase__ = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 161
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =CodeGenTokenizer
a__ =CodeGenTokenizerFast
a__ =True
a__ ={'''add_prefix_space''': True}
a__ =False
def __lowerCAmelCase ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_UpperCAmelCase : str = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCAmelCase ( self , **A ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , **A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : List[Any] = '''lower newer'''
_UpperCAmelCase : Dict = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Optional[int] = '''lower newer'''
_UpperCAmelCase : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A , add_prefix_space=A )
self.assertListEqual(A , A )
_UpperCAmelCase : Dict = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=A )
_UpperCAmelCase : Dict = '''lower newer'''
# Testing tokenization
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A , add_prefix_space=A )
_UpperCAmelCase : Any = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Dict = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=A )
_UpperCAmelCase : List[str] = tokenizer.encode(A , add_prefix_space=A )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# Testing the unknown token
_UpperCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self , *A , **A ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCAmelCase ( self , A=1_5 ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
_UpperCAmelCase : str = '''This is a simple input'''
_UpperCAmelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_UpperCAmelCase : Optional[int] = '''This is a simple input'''
_UpperCAmelCase : Dict = ['''This is a simple input looooooooong''', '''This is a simple input''']
_UpperCAmelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_UpperCAmelCase : List[str] = tokenizer.pad_token_id
_UpperCAmelCase : Tuple = tokenizer(A , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_UpperCAmelCase : Optional[Any] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
_UpperCAmelCase : int = tokenizer(*A , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_UpperCAmelCase : List[str] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[int] = '''$$$'''
_UpperCAmelCase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A )
_UpperCAmelCase : Tuple = '''This is a simple input'''
_UpperCAmelCase : int = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : List[str] = tokenizer.bos_token_id
_UpperCAmelCase : str = tokenizer(A )
_UpperCAmelCase : Optional[Any] = tokenizer(A )
self.assertEqual(out_s.input_ids[0] , A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCAmelCase : Tuple = tokenizer.decode(out_s.input_ids )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
_UpperCAmelCase : Any = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
_UpperCAmelCase : Union[str, Any] = '''\nif len_a > len_b: result = a\nelse: result = b'''
_UpperCAmelCase : Any = tokenizer.encode(A )
_UpperCAmelCase : Tuple = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
_UpperCAmelCase : List[str] = tokenizer.decode(A , truncate_before_pattern=A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
| 506
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case = """
Human: <<task>>
Assistant: """
snake_case = """huggingface-tools/default-prompts"""
snake_case = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
lowerCAmelCase__ : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
lowerCAmelCase__ : Optional[Any] = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 568
|
'''simple docstring'''
snake_case = 8.3144598
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
snake_case = 3_00
snake_case = 28
snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 568
| 1
|
from functools import lru_cache
@lru_cache
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 424
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LxmertTokenizer
_a = LxmertTokenizerFast
_a = True
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ :Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase__ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''UNwant\u00E9d,running'''
UpperCamelCase__ :Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase__ :int = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
UpperCamelCase__ :str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :int = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ :Any = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 189
| 0
|
def __A(lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = []
if len(lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase ) ):
_UpperCamelCase = nums.pop(0 )
_UpperCamelCase = permute(lowerCAmelCase )
for perm in permutations:
perm.append(lowerCAmelCase )
result.extend(lowerCAmelCase )
nums.append(lowerCAmelCase )
return result
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
def backtrack(lowerCAmelCase ):
if start == len(lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase , len(lowerCAmelCase ) ):
_UpperCamelCase , _UpperCamelCase = nums[i], nums[start]
backtrack(start + 1 )
_UpperCamelCase , _UpperCamelCase = nums[i], nums[start] # backtrack
_UpperCamelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCamelCase__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 202
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["YolosFeatureExtractor"]
lowerCamelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
| 1
|
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = SMALL_MODEL_IDENTIFIER
lowerCAmelCase__ :List[Any] = 'pt'
lowerCAmelCase__ :Optional[Any] = 'tf'
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCAmelCase__ :Dict = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase__ :List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase__ :Any = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :int = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase__ :Optional[Any] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 93
|
'''simple docstring'''
import os
from collections.abc import Iterator
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE__ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).lstrip('''./''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(SCREAMING_SNAKE_CASE__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "." ) -> None:
'''simple docstring'''
snake_case : List[Any] = ''''''
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE__ ) ):
snake_case ,snake_case : Optional[Any] = os.path.split(SCREAMING_SNAKE_CASE__ )
if filepath != old_path:
snake_case : Dict = print_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : int = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case : int = F'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
snake_case : int = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'{md_prefix(SCREAMING_SNAKE_CASE__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(".")
| 638
| 0
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return sum(i for i in range(1,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase_ = int(input('''Enter number: ''').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 706
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,bias=SCREAMING_SNAKE_CASE )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE,map_location='cpu' )
_UpperCAmelCase = mam_aaa['args'] or mam_aaa['cfg']['model']
_UpperCAmelCase = mam_aaa['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
_UpperCAmelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
_UpperCAmelCase = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE,max_position_embeddings=1024,encoder_layers=args.encoder_layers,decoder_layers=args.decoder_layers,encoder_attention_heads=args.encoder_attention_heads,decoder_attention_heads=args.decoder_attention_heads,encoder_ffn_dim=args.encoder_ffn_embed_dim,decoder_ffn_dim=args.decoder_ffn_embed_dim,d_model=args.encoder_embed_dim,encoder_layerdrop=args.encoder_layerdrop,decoder_layerdrop=args.decoder_layerdrop,dropout=args.dropout,attention_dropout=args.attention_dropout,activation_dropout=args.activation_dropout,activation_function='relu',)
_UpperCAmelCase = state_dict['decoder.embed_tokens.weight']
_UpperCAmelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE,strict=SCREAMING_SNAKE_CASE )
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 494
| 0
|
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : Any ) -> Tuple:
__UpperCAmelCase =(0, 0)
__UpperCAmelCase =None
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =0
def __eq__( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
return self.position == cell.position
def _a ( self : str ) -> Any:
print(self.position )
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=(5, 5) ) -> int:
__UpperCAmelCase =np.zeros(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =world_size[0]
__UpperCAmelCase =world_size[1]
def _a ( self : Optional[Any] ) -> Tuple:
print(self.w )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
__UpperCAmelCase =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCAmelCase =cell.position[0]
__UpperCAmelCase =cell.position[1]
__UpperCAmelCase =[]
for n in neughbour_cord:
__UpperCAmelCase =current_x + n[0]
__UpperCAmelCase =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCAmelCase =Cell()
__UpperCAmelCase =(x, y)
__UpperCAmelCase =cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def lowercase__ ( A_: Dict , A_: Tuple , A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[]
__UpperCAmelCase =[]
_open.append(A_ )
while _open:
__UpperCAmelCase =np.argmin([n.f for n in _open] )
__UpperCAmelCase =_open[min_f]
_closed.append(_open.pop(A_ ) )
if current == goal:
break
for n in world.get_neigbours(A_ ):
for c in _closed:
if c == n:
continue
__UpperCAmelCase =current.g + 1
__UpperCAmelCase , __UpperCAmelCase =n.position
__UpperCAmelCase , __UpperCAmelCase =goal.position
__UpperCAmelCase =(ya - ya) ** 2 + (xa - xa) ** 2
__UpperCAmelCase =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A_ )
__UpperCAmelCase =[]
while current.parent is not None:
path.append(current.position )
__UpperCAmelCase =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A = Gridworld()
# Start position and goal
__A = Cell()
__A = (0, 0)
__A = Cell()
__A = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__A = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A = 1
print(world.w)
| 68
|
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5
| 0
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def snake_case (A_ :Optional[int] ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_UpperCamelCase : int = parser.parse_args()
_UpperCamelCase : Optional[int] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 118
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Optional[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 118
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 621
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : dict[int, int] = {}
_SCREAMING_SNAKE_CASE : List[Any] = 2
while True:
_SCREAMING_SNAKE_CASE : List[Any] = factor_map.pop(lowerCamelCase, lowerCamelCase )
if factor:
_SCREAMING_SNAKE_CASE : str = factor + prime
while x in factor_map:
x += factor
_SCREAMING_SNAKE_CASE : Union[str, Any] = factor
else:
_SCREAMING_SNAKE_CASE : Optional[int] = prime
yield prime
prime += 1
def lowercase__ ( lowerCamelCase = 1E10 ):
_SCREAMING_SNAKE_CASE : Dict = sieve()
_SCREAMING_SNAKE_CASE : Dict = 1
while True:
_SCREAMING_SNAKE_CASE : int = next(lowerCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 621
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCamelCase__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class a__ ( snake_case__ ):
_a : Any = """retribert"""
def __init__( self , _A=3_0_5_2_2 , _A=7_6_8 , _A=8 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1E-1_2 , _A=True , _A=1_2_8 , _A=0 , **_A , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = share_encoders
__lowerCAmelCase = projection_dim
| 552
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = "huggingface/label-files"
if "kinetics" in model_name:
__lowerCAmelCase = 4_00
__lowerCAmelCase = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__lowerCAmelCase = 1_74
__lowerCAmelCase = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
if "small" in model_name:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
elif "large" in model_name:
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 5_12
__lowerCAmelCase = 20_48
elif "huge" in model_name:
__lowerCAmelCase = 12_80
__lowerCAmelCase = 51_20
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 6_40
__lowerCAmelCase = 25_60
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if "encoder." in name:
__lowerCAmelCase = name.replace("encoder." , "" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
return name
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith("encoder." ):
__lowerCAmelCase = key.replace("encoder." , "" )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
if key.startswith("decoder.blocks" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = "decoder.decoder_layers."
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = "videomae.encoder.layer."
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _a ( ):
__lowerCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowerCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = get_videomae_config(SCREAMING_SNAKE_CASE_ )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = "pytorch_model.bin"
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
if "model" in files:
__lowerCAmelCase = files["model"]
else:
__lowerCAmelCase = files["module"]
__lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 552
| 1
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__a: Union[str, Any] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
_lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.task_name.lower()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''train'''
_lowerCamelCase = '''dev'''
_lowerCamelCase = '''test'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self : Any , lowerCamelCase : GlueDataTrainingArguments , lowerCamelCase : PreTrainedTokenizerBase , lowerCamelCase : Optional[int] = None , lowerCamelCase : Union[str, Split] = Split.train , lowerCamelCase : Optional[str] = None , ) -> int:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCamelCase , )
_UpperCAmelCase = args
_UpperCAmelCase = glue_processors[args.task_name]()
_UpperCAmelCase = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
_UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_UpperCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
_UpperCAmelCase = time.time()
_UpperCAmelCase = torch.load(lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase = examples[:limit_length]
_UpperCAmelCase = glue_convert_examples_to_features(
lowerCamelCase , lowerCamelCase , max_length=args.max_seq_length , label_list=lowerCamelCase , output_mode=self.output_mode , )
_UpperCAmelCase = time.time()
torch.save(self.features , lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , lowerCamelCase : Any ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return self.label_list
| 108
|
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659
| 0
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCAmelCase : Dict = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 1:
_UpperCAmelCase : Union[str, Any] = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCAmelCase )
_UpperCAmelCase : List[Any] = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( UpperCAmelCase ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 467
| 1
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__A : Optional[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__A : Optional[int] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
__A : str = '|'.join(sys.argv[1:])
__A : str = re.compile(rF"""^({joined_dirs}).*?\.py$""")
__A : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 394
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a , 'num_attention_heads' ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=640 , _a=4 , _a="silu" , _a=3 , _a=32 , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = last_hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = conv_kernel_size
a__ = output_stride
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = classifier_dropout_prob
a__ = use_labels
a__ = is_training
a__ = num_labels
a__ = initializer_range
a__ = scope
def lowercase__ ( self ):
"""simple docstring"""
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self , _a , _a , _a , _a ):
"""simple docstring"""
a__ = MobileViTModel(config=_a )
model.to(_a )
model.eval()
a__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self , _a , _a , _a , _a ):
"""simple docstring"""
a__ = self.num_labels
a__ = MobileViTForImageClassification(_a )
model.to(_a )
model.eval()
a__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , _a , _a , _a , _a ):
"""simple docstring"""
a__ = self.num_labels
a__ = MobileViTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
a__ = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE:List[Any] = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE:List[Any] = False
SCREAMING_SNAKE_CASE:Optional[Any] = False
SCREAMING_SNAKE_CASE:Union[str, Any] = False
SCREAMING_SNAKE_CASE:str = False
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTModelTester(self )
a__ = MobileViTConfigTester(self , config_class=_a , has_text_modality=_a )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_a )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_a , _a , _a ):
a__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(_a , _a ) )
a__ = outputs.hidden_states
a__ = 5
self.assertEqual(len(_a ) , _a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a__ = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(_a , _a , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = MobileViTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_a )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
# verify the logits
a__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
a__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(_a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
a__ = outputs.logits
# verify the logits
a__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
a__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(_a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
a__ = outputs.logits.detach().cpu()
a__ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
a__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
a__ = image_processor.post_process_semantic_segmentation(outputs=_a )
a__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 394
| 1
|
'''simple docstring'''
from typing import Any
def UpperCAmelCase_ ( __lowercase : list , __lowercase : list , __lowercase : dict , __lowercase : dict , __lowercase : dict , ) -> list:
'''simple docstring'''
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for state in states_space:
_UpperCAmelCase = observations_space[0]
_UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase ) ):
_UpperCAmelCase = observations_space[o]
_UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase = ""
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase = arg_max
# The final observation
_UpperCAmelCase = observations_space[len(__lowercase ) - 1]
# argmax for given final observation
_UpperCAmelCase = ""
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
_UpperCAmelCase = arg_max
# Process pointers backwards
_UpperCAmelCase = last_state
_UpperCAmelCase = []
for o in range(len(__lowercase ) - 1 , -1 , -1 ):
result.append(__lowercase )
_UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Any , __lowercase : Any , __lowercase : Any , __lowercase : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase )
_validate_dicts(
__lowercase , __lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Any , __lowercase : Any , __lowercase : Any , __lowercase : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Any ) -> None:
'''simple docstring'''
_validate_list(__lowercase , "observations_space" )
_validate_list(__lowercase , "states_space" )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __lowercase ):
_UpperCAmelCase = f'{var_name} must be a list'
raise ValueError(__lowercase )
else:
for x in _object:
if not isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = f'{var_name} must be a list of strings'
raise ValueError(__lowercase )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Any , __lowercase : Any , ) -> None:
'''simple docstring'''
_validate_dict(__lowercase , "initial_probabilities" , __lowercase )
_validate_nested_dict(__lowercase , "transition_probabilities" )
_validate_nested_dict(__lowercase , "emission_probabilities" )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __lowercase , __lowercase )
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : str , __lowercase : type , __lowercase : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __lowercase ):
_UpperCAmelCase = f'{var_name} must be a dict'
raise ValueError(__lowercase )
if not all(isinstance(__lowercase , __lowercase ) for x in _object ):
_UpperCAmelCase = f'{var_name} all keys must be strings'
raise ValueError(__lowercase )
if not all(isinstance(__lowercase , __lowercase ) for x in _object.values() ):
_UpperCAmelCase = "nested dictionary " if nested else ""
_UpperCAmelCase = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase_ ( __lowercase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __lowercase ).groups()[0]
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int=None , snake_case_ : List[Any]=None ):
_UpperCAmelCase = file_names
_UpperCAmelCase = image_transform
_UpperCAmelCase = label_to_id
def __len__( self : Union[str, Any] ):
return len(self.file_names )
def __getitem__( self : Any , snake_case_ : Tuple ):
_UpperCAmelCase = self.file_names[idx]
_UpperCAmelCase = PIL.Image.open(snake_case_ )
_UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
_UpperCAmelCase = self.image_transform(snake_case_ )
_UpperCAmelCase = extract_label(snake_case_ )
if self.label_to_id is not None:
_UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict ) -> Optional[int]:
'''simple docstring'''
if args.with_tracking:
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["lr"]
_UpperCAmelCase = int(config["num_epochs"] )
_UpperCAmelCase = int(config["seed"] )
_UpperCAmelCase = int(config["batch_size"] )
_UpperCAmelCase = config["image_size"]
if not isinstance(__lowercase , (list, tuple) ):
_UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCAmelCase = os.path.split(__lowercase )[-1].split("." )[0]
accelerator.init_trackers(__lowercase , __lowercase )
# Grab all the image filenames
_UpperCAmelCase = [os.path.join(args.data_dir , __lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_UpperCAmelCase = [extract_label(__lowercase ) for fname in file_names]
_UpperCAmelCase = list(set(__lowercase ) )
id_to_label.sort()
_UpperCAmelCase = {lbl: i for i, lbl in enumerate(__lowercase )}
# Set the seed before splitting the data.
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# Split our filenames between train and validation
_UpperCAmelCase = np.random.permutation(len(__lowercase ) )
_UpperCAmelCase = int(0.8 * len(__lowercase ) )
_UpperCAmelCase = random_perm[:cut]
_UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCAmelCase = Compose([RandomResizedCrop(__lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowercase , label_to_id=__lowercase )
# For evaluation, we use a deterministic Resize
_UpperCAmelCase = Compose([Resize(__lowercase ), ToTensor()] )
_UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowercase , label_to_id=__lowercase )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
_UpperCAmelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = create_model("resnet50d" , pretrained=__lowercase , num_classes=len(__lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCAmelCase = False
for param in model.get_classifier().parameters():
_UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
_UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCAmelCase = OneCycleLR(optimizer=__lowercase , max_lr=__lowercase , epochs=__lowercase , steps_per_epoch=len(__lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCAmelCase = os.path.splitext(__lowercase )[0]
if "epoch" in training_difference:
_UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
_UpperCAmelCase = None
else:
_UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
_UpperCAmelCase = resume_step // len(__lowercase )
resume_step -= starting_epoch * len(__lowercase )
# Now we train the model
for epoch in range(__lowercase , __lowercase ):
model.train()
if args.with_tracking:
_UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCAmelCase = accelerator.skip_first_batches(__lowercase , __lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCAmelCase = (batch["image"] - mean) / std
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = torch.nn.functional.cross_entropy(__lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCAmelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
model.eval()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = outputs.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
_UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__lowercase ),
"epoch": epoch,
} , step=__lowercase , )
if checkpointing_steps == "epoch":
_UpperCAmelCase = f'epoch_{epoch}'
if args.output_dir is not None:
_UpperCAmelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__lowercase , default=__lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__lowercase , default=__lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__lowercase , default=__lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 119
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """deformable_detr"""
_A : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self , lowercase__=True , lowercase__=None , lowercase__=3 , lowercase__=3_00 , lowercase__=10_24 , lowercase__=6 , lowercase__=10_24 , lowercase__=8 , lowercase__=6 , lowercase__=10_24 , lowercase__=8 , lowercase__=0.0 , lowercase__=True , lowercase__="relu" , lowercase__=2_56 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , lowercase__=True , lowercase__=False , lowercase__="sine" , lowercase__="resnet50" , lowercase__=True , lowercase__=False , lowercase__=4 , lowercase__=4 , lowercase__=4 , lowercase__=False , lowercase__=3_00 , lowercase__=False , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=1 , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=0.1 , lowercase__=0.25 , lowercase__=False , **lowercase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = backbone_config.get("""model_type""" )
snake_case_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Union[str, Any] = config_class.from_dict(lowercase__ )
snake_case_ : Optional[int] = use_timm_backbone
snake_case_ : List[Any] = backbone_config
snake_case_ : List[Any] = num_channels
snake_case_ : str = num_queries
snake_case_ : int = max_position_embeddings
snake_case_ : Union[str, Any] = d_model
snake_case_ : Dict = encoder_ffn_dim
snake_case_ : Optional[int] = encoder_layers
snake_case_ : Optional[int] = encoder_attention_heads
snake_case_ : int = decoder_ffn_dim
snake_case_ : int = decoder_layers
snake_case_ : Tuple = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : Optional[Any] = activation_function
snake_case_ : Any = init_std
snake_case_ : Optional[Any] = init_xavier_std
snake_case_ : str = encoder_layerdrop
snake_case_ : Optional[Any] = auxiliary_loss
snake_case_ : Optional[int] = position_embedding_type
snake_case_ : Optional[int] = backbone
snake_case_ : Dict = use_pretrained_backbone
snake_case_ : Optional[int] = dilation
# deformable attributes
snake_case_ : Any = num_feature_levels
snake_case_ : Optional[int] = encoder_n_points
snake_case_ : Optional[Any] = decoder_n_points
snake_case_ : Union[str, Any] = two_stage
snake_case_ : str = two_stage_num_proposals
snake_case_ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
snake_case_ : Union[str, Any] = class_cost
snake_case_ : List[Any] = bbox_cost
snake_case_ : Any = giou_cost
# Loss coefficients
snake_case_ : List[str] = mask_loss_coefficient
snake_case_ : List[str] = dice_loss_coefficient
snake_case_ : str = bbox_loss_coefficient
snake_case_ : Dict = giou_loss_coefficient
snake_case_ : str = eos_coefficient
snake_case_ : int = focal_alpha
snake_case_ : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
return self.encoder_attention_heads
@property
def __UpperCamelCase (self ):
return self.d_model
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Tuple = self.backbone_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 480
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
snake_case_ , snake_case_ : Union[str, Any] = 1, 1
for _ in range(number_of_steps - 1 ):
snake_case_ , snake_case_ : int = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480
| 1
|
"""simple docstring"""
from math import factorial
def lowercase__ ( lowercase_ = 100 ) -> int:
"""simple docstring"""
return sum(int(lowercase_ ) for x in str(factorial(lowercase_ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def A ( *a_ : Any , **a_ : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_OBJECT_DETECTION_MAPPING
def A ( self : Optional[Any] , a_ : int , a_ : Dict , a_ : List[Any] ):
"""simple docstring"""
__snake_case = ObjectDetectionPipeline(model=a_ , image_processor=a_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def A ( self : str , a_ : Dict , a_ : Dict ):
"""simple docstring"""
__snake_case = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(a_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a_ , {
"score": ANY(a_ ),
"label": ANY(a_ ),
"box": {"xmin": ANY(a_ ), "ymin": ANY(a_ ), "xmax": ANY(a_ ), "ymax": ANY(a_ )},
} , )
import datasets
__snake_case = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__snake_case = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
__snake_case = object_detector(a_ , threshold=0.0 )
self.assertEqual(len(a_ ) , len(a_ ) )
for outputs in batch_outputs:
self.assertGreater(len(a_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a_ , {
"score": ANY(a_ ),
"label": ANY(a_ ),
"box": {"xmin": ANY(a_ ), "ymin": ANY(a_ ), "xmax": ANY(a_ ), "ymax": ANY(a_ )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = "hf-internal-testing/tiny-detr-mobilenetsv3"
__snake_case = AutoModelForObjectDetection.from_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
__snake_case = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ )
__snake_case = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
__snake_case = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def A ( self : str ):
"""simple docstring"""
__snake_case = "facebook/detr-resnet-50"
__snake_case = AutoModelForObjectDetection.from_pretrained(a_ )
__snake_case = AutoFeatureExtractor.from_pretrained(a_ )
__snake_case = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ )
__snake_case = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
__snake_case = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def A ( self : int ):
"""simple docstring"""
__snake_case = "facebook/detr-resnet-50"
__snake_case = pipeline("object-detection" , model=a_ )
__snake_case = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
__snake_case = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = 0.9985
__snake_case = "facebook/detr-resnet-50"
__snake_case = pipeline("object-detection" , model=a_ )
__snake_case = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=a_ )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "Narsil/layoutlmv3-finetuned-funsd"
__snake_case = 0.9993
__snake_case = pipeline("object-detection" , model=a_ , threshold=a_ )
__snake_case = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 69
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,**a_ ):
"""simple docstring"""
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*a_ ,**a_ ):
"""simple docstring"""
return super().__call__(*a_ ,**a_ )
| 193
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
"""simple docstring"""
A_ = (PNDMScheduler,)
A_ = (('''num_inference_steps''', 5_0),)
def lowerCamelCase__ ( self : int , **lowercase_ : Any ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**lowercase_ )
return config
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : int=0 , **lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = dict(self.forward_default_kwargs )
lowercase_ = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase_ = self.get_scheduler_config(**lowercase_ )
lowercase_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowercase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowercase_ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowercase_ = dummy_past_residuals[:]
lowercase_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase_ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[int]=0 , **lowercase_ : int ):
'''simple docstring'''
lowercase_ = dict(self.forward_default_kwargs )
lowercase_ = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowercase_ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ = dummy_past_residuals[:]
lowercase_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase_ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Optional[Any] , **lowercase_ : Tuple ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(**lowercase_ )
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = 10
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase_ = model(lowercase_ , lowercase_ )
lowercase_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase_ = model(lowercase_ , lowercase_ )
lowercase_ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = dict(self.forward_default_kwargs )
lowercase_ = kwargs.pop("""num_inference_steps""" , lowercase_ )
for scheduler_class in self.scheduler_classes:
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ):
lowercase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowercase_ = dummy_past_residuals[:]
lowercase_ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
lowercase_ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase_ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
lowercase_ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(steps_offset=1 )
lowercase_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = 27
for scheduler_class in self.scheduler_classes:
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase_ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(lowercase_ ):
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.full_loop()
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.full_loop(prediction_type="""v_prediction""" )
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 603
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->float:
return 10 - x * x
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) >= 0:
raise ValueError("""Wrong space!""" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 603
| 1
|
def A ( lowercase__ : int , lowercase__ : int ) -> float:
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
UpperCamelCase = int(input("Enter the base: ").strip())
UpperCamelCase = int(input("Enter the exponent: ").strip())
UpperCamelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCamelCase = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 45
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """vit_msn"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-06 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
__A : Any = hidden_size
__A : Optional[int] = num_hidden_layers
__A : str = num_attention_heads
__A : Any = intermediate_size
__A : Dict = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : Dict = initializer_range
__A : List[str] = layer_norm_eps
__A : int = image_size
__A : Any = patch_size
__A : Optional[int] = num_channels
__A : int = qkv_bias
| 712
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase = HfApi()
UpperCamelCase = {}
# fmt: off
UpperCamelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCamelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCamelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCamelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCamelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCamelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCamelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCamelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCamelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCamelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCamelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCamelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCamelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCamelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCamelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCamelCase = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 387
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = [True] * limit
a__ = False
a__ = False
a__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < limit:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def _lowerCAmelCase (_lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = prime_sieve(_lowercase )
a__ = 0
a__ = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
a__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a__ = j - i
a__ = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 331
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] ,a__ : List[str]=None ,**a__ : List[str] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." ,a__ ,)
super().__init__(args=a__ ,**a__ )
| 331
| 1
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __snake_case : str , __snake_case : Dict=7 , __snake_case : Tuple=3 , __snake_case : Dict=18 , __snake_case : Dict=30 , __snake_case : Tuple=400 , __snake_case : Optional[int]=None , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : Optional[int]=None , ):
lowerCamelCase :Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20}
lowerCamelCase :Dict = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Union[str, Any] = num_channels
lowerCamelCase :str = image_size
lowerCamelCase :Optional[Any] = min_resolution
lowerCamelCase :int = max_resolution
lowerCamelCase :Optional[int] = size
lowerCamelCase :int = do_normalize
lowerCamelCase :Optional[Any] = do_convert_rgb
lowerCamelCase :Optional[int] = [512, 1024, 2048, 4096]
lowerCamelCase :List[str] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def snake_case ( self : Tuple ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case ( self : str ):
lowerCamelCase :Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowerCamelCase :Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = PixaStructImageProcessingTester(self )
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_convert_rgb''' ) )
def snake_case ( self : Any ):
lowerCamelCase :List[Any] = self.image_processor_tester.prepare_dummy_image()
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase :str = 2048
lowerCamelCase :Union[str, Any] = image_processor(__A , return_tensors='''pt''' , max_patches=__A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def snake_case ( self : Optional[int] ):
# Initialize image_processor
lowerCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase :Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Union[str, Any] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case ( self : Any ):
# Initialize image_processor
lowerCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase :Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowerCamelCase :Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__A ):
lowerCamelCase :Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
lowerCamelCase :int = '''Hello'''
lowerCamelCase :int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Optional[Any] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case ( self : Optional[int] ):
# Initialize image_processor
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
lowerCamelCase :Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Dict = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case ( self : Tuple ):
# Initialize image_processor
lowerCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCamelCase :List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Optional[Any] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def snake_case ( self : Optional[int] ):
lowerCamelCase :Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
lowerCamelCase :Any = 3
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : str ):
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_convert_rgb''' ) )
def snake_case ( self : List[Any] ):
# Initialize image_processor
lowerCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase :Optional[int] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase :Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase :Optional[int] = image_processor(
__A , return_tensors='''pt''' , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray]
_SCREAMING_SNAKE_CASE : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 485
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _lowerCamelCase ( __A : Tuple ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = None
# source code of `config_class`
_UpperCAmelCase : Dict = inspect.getsource(__A )
_UpperCAmelCase : Dict = _re_checkpoint.findall(__A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
_UpperCAmelCase : int = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : str = ckpt_name
break
return checkpoint
def _lowerCamelCase ( ) -> Any:
_UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(__A )
_UpperCAmelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__A )
if len(__A ) > 0:
_UpperCAmelCase : Any = '''\n'''.join(sorted(__A ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 485
| 1
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A_ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Tuple =2_56
class __UpperCAmelCase ( lowercase__ ):
__A : int = ['melgan']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
# From MELGAN
lowerCAmelCase_ = math.log(1E-5 ) # Matches MelGAN training.
lowerCAmelCase_ = 4.0 # Largest value for most examples
lowerCAmelCase_ = 128
self.register_modules(
notes_encoder=UpperCAmelCase__ , continuous_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , melgan=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
lowerCAmelCase_ = output_range
if clip:
lowerCAmelCase_ = torch.clip(UpperCAmelCase__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowerCAmelCase_ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
lowerCAmelCase_ = input_range
lowerCAmelCase_ = torch.clip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if clip else outputs
# Scale to [0, 1].
lowerCAmelCase_ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = input_tokens > 0
lowerCAmelCase_ = self.notes_encoder(
encoder_input_tokens=UpperCAmelCase__ , encoder_inputs_mask=UpperCAmelCase__ )
lowerCAmelCase_ = self.continuous_encoder(
encoder_inputs=UpperCAmelCase__ , encoder_inputs_mask=UpperCAmelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = noise_time
if not torch.is_tensor(UpperCAmelCase__ ):
lowerCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCAmelCase__ ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowerCAmelCase_ = self.decoder(
encodings_and_masks=UpperCAmelCase__ , decoder_input_tokens=UpperCAmelCase__ , decoder_noise_time=UpperCAmelCase__ )
return logits
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 100 , _lowerCamelCase = True , _lowerCamelCase = "numpy" , _lowerCamelCase = None , _lowerCamelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCAmelCase__ )}.''' )
lowerCAmelCase_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowerCAmelCase_ = np.zeros([1, 0, self.n_dims] , np.floataa )
lowerCAmelCase_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase__ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCAmelCase__ ):
if i == 0:
lowerCAmelCase_ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowerCAmelCase_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCAmelCase_ = ones
lowerCAmelCase_ = self.scale_features(
UpperCAmelCase__ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase__ )
lowerCAmelCase_ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCAmelCase__ , continuous_mask=UpperCAmelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCAmelCase_ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase_ = self.decode(
encodings_and_masks=UpperCAmelCase__ , input_tokens=UpperCAmelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowerCAmelCase_ = self.scale_to_features(UpperCAmelCase__ , input_range=[-1.0, 1.0] )
lowerCAmelCase_ = mel[:1]
lowerCAmelCase_ = mel.cpu().float().numpy()
lowerCAmelCase_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info('''Generated segment''' , UpperCAmelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowerCAmelCase_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowerCAmelCase_ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase__ )
| 700
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ : List[str] =logging.get_logger(__name__)
A_ : int ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ : Dict =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str) -> List[Any]:
for attribute in key.split('''.'''):
lowerCAmelCase_ = getattr(__snake_case , __snake_case)
if weight_type is not None:
lowerCAmelCase_ = getattr(__snake_case , __snake_case).shape
else:
lowerCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
lowerCAmelCase_ = value
elif weight_type == "weight_g":
lowerCAmelCase_ = value
elif weight_type == "weight_v":
lowerCAmelCase_ = value
elif weight_type == "bias":
lowerCAmelCase_ = value
elif weight_type == "running_mean":
lowerCAmelCase_ = value
elif weight_type == "running_var":
lowerCAmelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase_ = value
elif weight_type == "inv_freq":
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def snake_case_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Any) -> Union[str, Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = fairseq_model.state_dict()
lowerCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
lowerCAmelCase_ = True
if "*" in mapped_key:
lowerCAmelCase_ = name.split(__snake_case)[0].split('''.''')[-2]
lowerCAmelCase_ = mapped_key.replace('''*''' , __snake_case)
if "pos_bias_u" in name:
lowerCAmelCase_ = None
elif "pos_bias_v" in name:
lowerCAmelCase_ = None
elif "weight_g" in name:
lowerCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase_ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase_ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase_ = '''num_batches_tracked'''
else:
lowerCAmelCase_ = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
continue
if not is_used:
unused_weights.append(__snake_case)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Optional[Any]) -> List[Any]:
lowerCAmelCase_ = full_name.split('''conv_layers.''')[-1]
lowerCAmelCase_ = name.split('''.''')
lowerCAmelCase_ = int(items[0])
lowerCAmelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__snake_case)
@torch.no_grad()
def snake_case_ ( __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str]=None , __snake_case : int=None , __snake_case : Union[str, Any]=True) -> Union[str, Any]:
if config_path is not None:
lowerCAmelCase_ = WavaVecaConformerConfig.from_pretrained(__snake_case , hidden_act='''swish''')
else:
lowerCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase_ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase_ = Dictionary.load(__snake_case)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ = target_dict.pad_index
lowerCAmelCase_ = target_dict.bos_index
lowerCAmelCase_ = target_dict.eos_index
lowerCAmelCase_ = len(target_dict.symbols)
lowerCAmelCase_ = os.path.join(__snake_case , '''vocab.json''')
if not os.path.isdir(__snake_case):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case))
return
os.makedirs(__snake_case , exist_ok=__snake_case)
lowerCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
with open(__snake_case , '''w''' , encoding='''utf-8''') as vocab_handle:
json.dump(__snake_case , __snake_case)
lowerCAmelCase_ = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowerCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case)
processor.save_pretrained(__snake_case)
lowerCAmelCase_ = WavaVecaConformerForCTC(__snake_case)
else:
lowerCAmelCase_ = WavaVecaConformerForPreTraining(__snake_case)
if is_finetuned:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
else:
lowerCAmelCase_ = argparse.Namespace(task='''audio_pretraining''')
lowerCAmelCase_ = fairseq.tasks.setup_task(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case)
lowerCAmelCase_ = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned)
hf_wavavec.save_pretrained(__snake_case)
if __name__ == "__main__":
A_ : Any =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ : Union[str, Any] =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 606
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_)
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True})
UpperCAmelCase : ClassVar[Features] = Features({'text': Value('string')})
UpperCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel})
UpperCAmelCase : str = "text"
UpperCAmelCase : str = "labels"
def lowerCamelCase_ ( self : List[Any] , snake_case : Any ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Tuple = self.label_schema.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE : Any = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 352
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Tuple = 'xlm-prophetnet'
UpperCAmelCase : int = ['past_key_values']
UpperCAmelCase : List[Any] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : List[Any] , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 30522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_layers
SCREAMING_SNAKE_CASE : str = num_encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = num_decoder_layers
SCREAMING_SNAKE_CASE : List[str] = num_decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE : List[str] = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE : Tuple = ngram
SCREAMING_SNAKE_CASE : Optional[int] = num_buckets
SCREAMING_SNAKE_CASE : Any = relative_max_distance
SCREAMING_SNAKE_CASE : Optional[int] = disable_ngram_loss
SCREAMING_SNAKE_CASE : List[str] = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : Tuple = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCamelCase_ ( self : List[Any] , snake_case : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 352
| 1
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCamelCase__ = {'facebook/blenderbot_small-90M': 5_12}
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : List[str] = set()
UpperCAmelCase__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
UpperCAmelCase__ : Optional[int] = set(_snake_case )
return pairs
class a ( lowercase ):
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="__start__" , UpperCamelCase_="__end__" , UpperCamelCase_="__unk__" , UpperCamelCase_="__null__" , **UpperCamelCase_ , ):
super().__init__(unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ : Optional[int] = json.load(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase__ : Dict = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase__ : List[str] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCAmelCase__ : Dict = {}
@property
def __snake_case ( self ):
return len(self.encoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : int = re.sub('([.,!?()])' , R' \1' , UpperCamelCase_ )
UpperCAmelCase__ : Any = re.sub('(\')' , R' \1 ' , UpperCamelCase_ )
UpperCAmelCase__ : Dict = re.sub(R'\s{2,}' , ' ' , UpperCamelCase_ )
if "\n" in token:
UpperCAmelCase__ : List[str] = token.replace('\n' , ' __newln__' )
UpperCAmelCase__ : Tuple = token.split(' ' )
UpperCAmelCase__ : Optional[int] = []
for token in tokens:
if not len(UpperCamelCase_ ):
continue
UpperCAmelCase__ : Optional[Any] = token.lower()
UpperCAmelCase__ : Dict = tuple(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase__ : str = get_pairs(UpperCamelCase_ )
if not pairs:
words.append(UpperCamelCase_ )
continue
while True:
UpperCAmelCase__ : int = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = bigram
UpperCAmelCase__ : str = []
UpperCAmelCase__ : int = 0
while i < len(UpperCamelCase_ ):
try:
UpperCAmelCase__ : List[str] = word.index(UpperCamelCase_ , UpperCamelCase_ )
new_word.extend(word[i:j] )
UpperCAmelCase__ : List[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = '@@ '.join(UpperCamelCase_ )
UpperCAmelCase__ : int = word[:-4]
UpperCAmelCase__ : str = word
words.append(UpperCamelCase_ )
return " ".join(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = re.findall(R'\S+\n?' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(' ' ) ) )
return split_tokens
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = token.lower()
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = ' '.join(UpperCamelCase_ ).replace('@@ ' , '' ).strip()
return out_string
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '\n' )
UpperCAmelCase__ : Tuple = 0
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase__ : Dict = token_index
writer.write(' '.join(UpperCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 254
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase ( _snake_case ):
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
UpperCamelCase__ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class a ( lowercase ):
@staticmethod
def __snake_case ( UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=UpperCamelCase_ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , ):
UpperCAmelCase__ : Dict = logging.get_logger('transformers-cli/converting' )
self._logger.info(F'''Loading model {model_type}''' )
UpperCAmelCase__ : Dict = model_type
UpperCAmelCase__ : Optional[Any] = tf_checkpoint
UpperCAmelCase__ : Dict = pytorch_dump_output
UpperCAmelCase__ : List[Any] = config
UpperCAmelCase__ : List[str] = finetuning_task_name
def __snake_case ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ : Any = self._tf_checkpoint
UpperCAmelCase__ : List[str] = ''
else:
UpperCAmelCase__ : str = self._tf_checkpoint
UpperCAmelCase__ : List[Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCamelCase_ , self._config , self._pytorch_dump_output , UpperCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 254
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , "Tatoeba directory does not exist." )
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.resolver.write_model_card('''opus-mt-he-en''', dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 253
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads" ) )
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Optional[int] = image_size
__A : List[Any] = patch_sizes
__A : Optional[int] = patch_stride
__A : Dict = patch_padding
__A : int = is_training
__A : Tuple = use_labels
__A : List[str] = num_labels
__A : Tuple = num_channels
__A : Tuple = embed_dim
__A : Optional[int] = num_heads
__A : int = stride_kv
__A : Optional[int] = depth
__A : int = cls_token
__A : Optional[Any] = attention_drop_rate
__A : Tuple = initializer_range
__A : Any = layer_norm_eps
def __UpperCAmelCase( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
# create a random int32 tensor of given shape
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[Any] = TFCvtModel(config=__UpperCAmelCase )
__A : List[str] = model(__UpperCAmelCase , training=__UpperCAmelCase )
__A : Any = (self.image_size, self.image_size)
__A , __A : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__A : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__A : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[str] = self.num_labels
__A : Optional[int] = TFCvtForImageClassification(__UpperCAmelCase )
__A : Optional[int] = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase( self ):
__A : List[Any] = self.prepare_config_and_inputs()
__A , __A , __A : Optional[Any] = config_and_inputs
__A : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase_ : str = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : str = False
lowerCamelCase_ : Dict = False
def __UpperCAmelCase( self ):
__A : Optional[int] = TFCvtModelTester(self )
__A : str = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __UpperCAmelCase( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __UpperCAmelCase( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __UpperCAmelCase( self ):
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __UpperCAmelCase( self ):
__A : int = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __UpperCAmelCase( self ):
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(__UpperCAmelCase )
__A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = model_class(__UpperCAmelCase )
__A : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__A : Optional[int] = outputs.hidden_states
__A : int = len(self.model_tester.depth )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Dict = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __UpperCAmelCase( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = TFCvtModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> List[Any]:
__A : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase( self ):
__A : Any = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A : int = self.default_image_processor
__A : Dict = prepare_img()
__A : Dict = image_processor(images=__UpperCAmelCase , return_tensors="tf" )
# forward pass
__A : Optional[Any] = model(**__UpperCAmelCase )
# verify the logits
__A : int = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__A : Optional[Any] = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1e-4 ) )
| 520
| 0
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , lowercase__ : str , lowercase__ : Dict=1_3 , lowercase__ : Optional[Any]=3_0 , lowercase__ : Dict=2 , lowercase__ : Dict=3 , lowercase__ : Any=True , lowercase__ : str=True , lowercase__ : Optional[int]=3_2 , lowercase__ : Optional[Any]=5 , lowercase__ : Any=4 , lowercase__ : Union[str, Any]=3_7 , lowercase__ : Optional[Any]="gelu" , lowercase__ : int=0.1 , lowercase__ : Any=0.1 , lowercase__ : List[Any]=1_0 , lowercase__ : Any=0.02 , lowercase__ : str=None , ):
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = type_sequence_label_size
a_ = initializer_range
a_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = num_patches + 1
def __magic_name__ ( self : Optional[Any] ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : int ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : Dict ):
a_ = ViTMSNModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : int , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Union[str, Any] ):
a_ = self.type_sequence_label_size
a_ = ViTMSNForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , labels=lowercase__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ = 1
a_ = ViTMSNForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : str ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __magic_name__ ( self : List[str] ):
a_ = ViTMSNModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
def __magic_name__ ( self : Dict ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __magic_name__ ( self : int ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def __magic_name__ ( self : Optional[Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __magic_name__ ( self : List[Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def __magic_name__ ( self : Dict ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = ViTMSNModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Dict ):
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[Any] ):
torch.manual_seed(2 )
a_ = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
# verify the logits
a_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase__ )
a_ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 143
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143
| 1
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_ = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(_lowercase ) != 0:
SCREAMING_SNAKE_CASE_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase, (int, float) ):
raise error
SCREAMING_SNAKE_CASE_ = rows
else:
SCREAMING_SNAKE_CASE_ = []
def a__ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a__ ( self ) -> int:
return len(self.rows )
@property
def a__ ( self ) -> int:
return len(self.rows[0] )
@property
def a__ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def a__ ( self ) -> bool:
return self.order[0] == self.order[1]
def a__ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def a__ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a__ ( self ) -> bool:
return bool(self.determinant() )
def a__ ( self, _lowercase, _lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def a__ ( self, _lowercase, _lowercase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(_lowercase, _lowercase )
return -1 * self.get_minor(_lowercase, _lowercase )
def a__ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(_lowercase, _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a__ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a__ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def a__ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE_ = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(_lowercase ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def a__ ( self, _lowercase, _lowercase = None ) -> None:
SCREAMING_SNAKE_CASE_ = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(_lowercase, _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase, (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(_lowercase )
else:
SCREAMING_SNAKE_CASE_ = self.rows[0:position] + [row] + self.rows[position:]
def a__ ( self, _lowercase, _lowercase = None ) -> None:
SCREAMING_SNAKE_CASE_ = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(_lowercase, _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase, (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
SCREAMING_SNAKE_CASE_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self, _lowercase ) -> bool:
if not isinstance(_lowercase, _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self, _lowercase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self, _lowercase ) -> Matrix:
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self, _lowercase ) -> Matrix:
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self, _lowercase ) -> Matrix:
if isinstance(_lowercase, (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase, _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(_lowercase, _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self, _lowercase ) -> Matrix:
if not isinstance(_lowercase, _lowercase ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
SCREAMING_SNAKE_CASE_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a__ ( cls, _lowercase, _lowercase ) -> int:
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def A__ ( A : Dict , A : Any , A : Tuple , A : Tuple):
'''simple docstring'''
UpperCamelCase : Tuple = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase : List[str] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
UpperCamelCase : int = F'''{src_lang}-{tgt_lang}'''
UpperCamelCase : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=A , exist_ok=A)
UpperCamelCase : int = os.path.join(A , "README.md")
print(F'''Generating {path}''')
with open(A , "w" , encoding="utf-8") as f:
f.write(A)
# make sure we are under the root of the project
lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 173
| 0
|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def __lowercase ( a : str ) -> str:
if not sentence:
return ""
__snake_case : Optional[Any] =dict(zip(a , a ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 497
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowercase ( a : str ) -> None:
__snake_case , __snake_case : List[Any] =analyze_text(a )
__snake_case : Union[str, Any] =list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
__snake_case : List[Any] =sum(single_char_strings.values() )
# one length string
__snake_case : str =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__snake_case : Union[str, Any] =single_char_strings[ch]
__snake_case : Any =my_str / all_sum
my_fir_sum += prob * math.loga(a ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__snake_case : Optional[Any] =sum(two_char_strings.values() )
__snake_case : Dict =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__snake_case : Tuple =cha + cha
if sequence in two_char_strings:
__snake_case : List[Any] =two_char_strings[sequence]
__snake_case : Tuple =int(a ) / all_sum
my_sec_sum += prob * math.loga(a )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __lowercase ( a : str ) -> tuple[dict, dict]:
__snake_case : List[str] =Counter() # type: ignore
__snake_case : Union[str, Any] =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowercase ( ) -> int:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 497
| 1
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( a :str , a :Union[str, Any] , a :str ) -> Optional[int]:
# Initialise PyTorch model
a = MobileBertConfig.from_json_file(a )
print(F"""Building PyTorch model from configuration: {config}""" )
a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 117
|
from __future__ import annotations
UpperCAmelCase__ = list[tuple[int, int]]
UpperCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Node | None , ) ->int:
"""simple docstring"""
a = pos_x
a = pos_y
a = (pos_y, pos_x)
a = goal_x
a = goal_y
a = g_cost
a = parent
a = self.calculate_heuristic()
def __lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
a = abs(self.pos_x - self.goal_x )
a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , __UpperCAmelCase : Tuple ) ->bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : tuple[int, int] , __UpperCAmelCase : tuple[int, int] ) ->Dict:
"""simple docstring"""
a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCAmelCase )
a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __UpperCAmelCase )
a = [self.start]
a = []
a = False
def __lowerCAmelCase ( self : str ) ->Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
a = True
return self.retrace_path(__UpperCAmelCase )
self.closed_nodes.append(__UpperCAmelCase )
a = self.get_successors(__UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCAmelCase )
else:
# retrieve the best current path
a = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCAmelCase )
else:
self.open_nodes.append(__UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Node ) ->list[Node]:
"""simple docstring"""
a = []
for action in delta:
a = parent.pos_x + action[1]
a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCAmelCase , ) )
return successors
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Node | None ) ->Path:
"""simple docstring"""
a = node
a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCAmelCase__ = GreedyBestFirst(init, goal)
UpperCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase__ = 2
for elem in grid:
print(elem)
| 117
| 1
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
def __init__( self : List[Any] , *lowercase__ : Tuple , **lowercase__ : List[Any] ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 702
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCamelCase__ = HUGGINGFACE_HUB_CACHE
UpperCamelCase__ = '''config.json'''
UpperCamelCase__ = '''diffusion_pytorch_model.bin'''
UpperCamelCase__ = '''diffusion_flax_model.msgpack'''
UpperCamelCase__ = '''model.onnx'''
UpperCamelCase__ = '''diffusion_pytorch_model.safetensors'''
UpperCamelCase__ = '''weights.pb'''
UpperCamelCase__ = '''https://huggingface.co'''
UpperCamelCase__ = default_cache_path
UpperCamelCase__ = '''diffusers_modules'''
UpperCamelCase__ = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
UpperCamelCase__ = ['''fp16''', '''non-ema''']
UpperCamelCase__ = '''.self_attn'''
| 143
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = ['''input_features''', '''attention_mask''']
def __init__( self : List[Any] , __lowerCamelCase : str=8_0 , __lowerCamelCase : str=1_6_0_0_0 , __lowerCamelCase : List[str]=8_0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
_snake_case = num_mel_bins
_snake_case = do_ceptral_normalize
_snake_case = normalize_means
_snake_case = normalize_vars
_snake_case = True
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : np.ndarray , ):
"""simple docstring"""
_snake_case = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_snake_case = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
_snake_case = ta_kaldi.fbank(__lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : float = 0.0 , ):
"""simple docstring"""
# make sure we normalize float32 arrays
if normalize_means:
_snake_case = x[:input_length].mean(axis=0 )
_snake_case = np.subtract(__lowerCamelCase , __lowerCamelCase )
if normalize_vars:
_snake_case = x[:input_length].std(axis=0 )
_snake_case = np.divide(__lowerCamelCase , __lowerCamelCase )
if input_length < x.shape[0]:
_snake_case = padding_value
# make sure array is in float32
_snake_case = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : Optional[np.ndarray] = None ):
"""simple docstring"""
_snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCamelCase , __lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCamelCase , __lowerCamelCase )
]
def __call__( self : int , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_snake_case = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
_snake_case = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case = [raw_speech]
# extract fbank features
_snake_case = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
_snake_case = BatchFeature({'''input_features''': features} )
_snake_case = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
# make sure list is in array format
_snake_case = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowerCamelCase ):
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
_snake_case = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_snake_case = (
np.array(__lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase , max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_snake_case = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowerCamelCase )
if return_tensors is not None:
_snake_case = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 103
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : List[str] =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = None
lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = """\n""".join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase = diff.replace(__lowerCAmelCase , """""" )
self.assertEqual(__lowerCAmelCase , """""" )
def A__ ( self ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _A ( lowerCAmelCase ):
snake_case__ : Any = False
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
else:
lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
lowercase = re.findall("""({.+})""" , __lowerCAmelCase )
lowercase = [r for r in results if """accuracy""" in r][-1]
lowercase = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , """tracking""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 359
| 0
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowercase_: Dict = logging.get_logger(__name__)
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : Tuple , *__a : Optional[Any] , **__a : Dict ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 127
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase_: Optional[int] = datasets.utils.logging.get_logger(__name__)
lowercase_: Optional[Any] = ['names', 'prefix']
lowercase_: Optional[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase_: List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase_: Any = ['date_format']
@dataclass
class lowercase__ (datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : str = ","
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCamelCase : Optional[List[str]] = None
__UpperCamelCase : Optional[List[str]] = None
__UpperCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCamelCase : Optional[Union[List[int], List[str]]] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCamelCase : Optional[list] = None
__UpperCamelCase : Optional[list] = None
__UpperCamelCase : bool = False
__UpperCamelCase : Optional[Union[int, List[int]]] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[Union[str, List[str]]] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : str = "."
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : str = '"'
__UpperCamelCase : int = 0
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : int = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : int = 1_0_0_0_0
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : Optional[str] = "strict"
__UpperCamelCase : Literal["error", "warn", "skip"] = "error"
__UpperCamelCase : Optional[str] = None
def lowercase ( self : Dict ):
if self.delimiter is not None:
snake_case__ : int = self.delimiter
if self.column_names is not None:
snake_case__ : List[str] = self.column_names
@property
def lowercase ( self : str ):
snake_case__ : Any = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCamelCase : Tuple = CsvConfig
def lowercase ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self : List[str] , __a : Union[str, Any] ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case__ : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
snake_case__ : str = data_files
if isinstance(__a , __a ):
snake_case__ : str = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
snake_case__ : Any = [files]
snake_case__ : Optional[Any] = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def lowercase ( self : Tuple , __a : pa.Table ):
if self.config.features is not None:
snake_case__ : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
snake_case__ : List[str] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case__ : Optional[int] = table_cast(__a , __a )
return pa_table
def lowercase ( self : Any , __a : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case__ : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
snake_case__ : Optional[int] = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
snake_case__ : List[str] = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise
| 127
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = '''lxmert'''
SCREAMING_SNAKE_CASE__ : str = {}
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=9500 , _lowerCAmelCase=1600 , _lowerCAmelCase=400 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=9 , _lowerCAmelCase=5 , _lowerCAmelCase=5 , _lowerCAmelCase=2048 , _lowerCAmelCase=4 , _lowerCAmelCase=6.67 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = vocab_size
__SCREAMING_SNAKE_CASE: str = hidden_size
__SCREAMING_SNAKE_CASE: List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE: Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE: str = intermediate_size
__SCREAMING_SNAKE_CASE: int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Tuple = type_vocab_size
__SCREAMING_SNAKE_CASE: List[Any] = initializer_range
__SCREAMING_SNAKE_CASE: str = layer_norm_eps
__SCREAMING_SNAKE_CASE: int = num_qa_labels
__SCREAMING_SNAKE_CASE: int = num_object_labels
__SCREAMING_SNAKE_CASE: List[Any] = num_attr_labels
__SCREAMING_SNAKE_CASE: str = l_layers
__SCREAMING_SNAKE_CASE: List[str] = x_layers
__SCREAMING_SNAKE_CASE: Optional[Any] = r_layers
__SCREAMING_SNAKE_CASE: Any = visual_feat_dim
__SCREAMING_SNAKE_CASE: List[str] = visual_pos_dim
__SCREAMING_SNAKE_CASE: int = visual_loss_normalizer
__SCREAMING_SNAKE_CASE: int = task_matched
__SCREAMING_SNAKE_CASE: Dict = task_mask_lm
__SCREAMING_SNAKE_CASE: str = task_obj_predict
__SCREAMING_SNAKE_CASE: Tuple = task_qa
__SCREAMING_SNAKE_CASE: Tuple = visual_obj_loss
__SCREAMING_SNAKE_CASE: Tuple = visual_attr_loss
__SCREAMING_SNAKE_CASE: Any = visual_feat_loss
__SCREAMING_SNAKE_CASE: Tuple = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_lowerCAmelCase )
| 202
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''data2vec-vision'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 11] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=256 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=255 , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = hidden_size
__SCREAMING_SNAKE_CASE: Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE: Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE: int = intermediate_size
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE: int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Any = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[Any] = image_size
__SCREAMING_SNAKE_CASE: List[str] = patch_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_channels
__SCREAMING_SNAKE_CASE: List[Any] = use_mask_token
__SCREAMING_SNAKE_CASE: Tuple = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE: Any = use_relative_position_bias
__SCREAMING_SNAKE_CASE: Dict = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE: Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE: List[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE: int = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Union[str, Any] = out_indices
__SCREAMING_SNAKE_CASE: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE: Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE: Any = auxiliary_concat_input
__SCREAMING_SNAKE_CASE: List[str] = semantic_loss_ignore_index
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1e-4
| 202
| 1
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = 0
_UpperCamelCase : int = [0]
_UpperCamelCase : Union[str, Any] = [0]
_UpperCamelCase : str = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
_UpperCamelCase : Union[str, Any] = [60]
_UpperCamelCase : Tuple = [10]
_UpperCamelCase : str = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = [1, 2, 3]
_UpperCamelCase : List[str] = [3, 2, 1]
_UpperCamelCase : List[Any] = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 5 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = 50
_UpperCamelCase : List[Any] = [60, 1_00, 1_20]
_UpperCamelCase : int = [10, 20, 30]
_UpperCamelCase : Union[str, Any] = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 716
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[2, 2, 3, 2] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=None , ):
'''simple docstring'''
_UpperCamelCase : str = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : str = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Union[str, Any] = num_stages
_UpperCamelCase : Any = hidden_sizes
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : int = num_labels
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Any = out_indices
_UpperCamelCase : Any = scope
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowercase_ (self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = ConvNextVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : int = ConvNextVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : str = model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = ConvNextVaModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def lowercase_ (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ (self ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowercase_ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowercase_ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowercase_ (self ):
'''simple docstring'''
pass
def lowercase_ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Dict = True
if model_class.__name__ in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]:
continue
_UpperCamelCase : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCamelCase : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCamelCase : str = model(**lowerCAmelCase__ ).loss
loss.backward()
def lowercase_ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCamelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
_UpperCamelCase : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCamelCase : Dict = model(**lowerCAmelCase__ ).loss
loss.backward()
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(lowerCAmelCase__ )
_UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[Any] = [*signature.parameters.keys()]
_UpperCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = ConvNextVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCAmelCase ( ) -> List[str]:
_UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : str = preprocessor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase : Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 239
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Any ):
UpperCAmelCase_ :List[Any] = tempfile.mkdtemp()
UpperCAmelCase_ :List[Any] = BlipImageProcessor()
UpperCAmelCase_ :str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase_ :Dict = BlipProcessor(snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Dict , **snake_case : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def snake_case_ ( self : List[str] , **snake_case : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def snake_case_ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ :str = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Tuple = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase_ :Union[str, Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
UpperCAmelCase_ :Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Tuple = self.get_image_processor()
UpperCAmelCase_ :Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ :int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ :Union[str, Any] = image_processor(snake_case , return_tensors='''np''' )
UpperCAmelCase_ :Any = processor(images=snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
UpperCAmelCase_ :List[str] = self.get_image_processor()
UpperCAmelCase_ :Tuple = self.get_tokenizer()
UpperCAmelCase_ :List[Any] = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Dict = '''lower newer'''
UpperCAmelCase_ :str = processor(text=snake_case )
UpperCAmelCase_ :Any = tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Dict = self.get_image_processor()
UpperCAmelCase_ :Tuple = self.get_tokenizer()
UpperCAmelCase_ :Dict = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Union[str, Any] = '''lower newer'''
UpperCAmelCase_ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ :Any = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ :Optional[int] = self.get_tokenizer()
UpperCAmelCase_ :int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ :Optional[Any] = processor.batch_decode(snake_case )
UpperCAmelCase_ :Dict = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def snake_case_ ( self : str ):
UpperCAmelCase_ :str = self.get_image_processor()
UpperCAmelCase_ :Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ :Dict = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :List[Any] = '''lower newer'''
UpperCAmelCase_ :List[str] = self.prepare_image_inputs()
UpperCAmelCase_ :List[str] = processor(text=snake_case , images=snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 608
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["GLPNFeatureExtractor"]
__lowerCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 453
|
_UpperCAmelCase : Union[str, Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 453
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
_lowercase = '''all_checks'''
_lowercase = '''basic_checks'''
_lowercase = '''no_checks'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ : str =""" for """ + verification_name if verification_name is not None else """"""
if len(lowerCAmelCase_ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE_ : List[str] =[
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase_ ) )
logger.info('All the splits matched successfully.' )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Tuple = True ) -> Any:
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE_ : Optional[int] =shaaaa()
with open(lowerCAmelCase_ ,'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) ,B'' ):
m.update(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ : Any =None
return {"num_bytes": os.path.getsize(lowerCAmelCase_ ), "checksum": checksum}
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 220
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_lowerCAmelCase = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "mumbai" ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowerCAmelCase__ : Union[str, Any] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowerCAmelCase__ : str = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 565
| 0
|
'''simple docstring'''
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set_counts
lowercase__ = max(lowercase__)
lowercase__ = len(lowercase__)
lowercase__ = [1] * num_sets
lowercase__ = list(range(lowercase__))
def UpperCAmelCase ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Any) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_parent(lowercase__)
lowercase__ = self.get_parent(lowercase__)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ = 0
lowercase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ = 0
lowercase__ = src_parent
lowercase__ = self.set_counts[src_parent]
lowercase__ = max(self.max_set , lowercase__)
return True
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| 702
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = 2
@register_to_config
def __init__( self : str , __snake_case : float = 0.02 , __snake_case : float = 1_0_0 , __snake_case : float = 1.007 , __snake_case : float = 8_0 , __snake_case : float = 0.05 , __snake_case : float = 5_0 , ) -> List[Any]:
# standard deviation of the initial noise distribution
__magic_name__: str = sigma_max
# setable values
__magic_name__: int = None
__magic_name__: np.IntTensor = None
__magic_name__: torch.FloatTensor = None # sigma(t_i)
def lowerCamelCase__ ( self : Any , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowerCamelCase__ ( self : Optional[int] , __snake_case : int , __snake_case : Union[str, torch.device] = None ) -> Tuple:
__magic_name__: Tuple = num_inference_steps
__magic_name__: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
__magic_name__: int = torch.from_numpy(__snake_case ).to(__snake_case )
__magic_name__: Optional[int] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__magic_name__: List[str] = torch.tensor(__snake_case , dtype=torch.floataa , device=__snake_case )
def lowerCamelCase__ ( self : str , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
__magic_name__: Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__magic_name__: Any = 0
# sample eps ~ N(0, S_noise^2 * I)
__magic_name__: Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=__snake_case ).to(sample.device )
__magic_name__: Tuple = sigma + gamma * sigma
__magic_name__: Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ ( self : str , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
__magic_name__: str = sample_hat + sigma_hat * model_output
__magic_name__: Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
__magic_name__: List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
__magic_name__: Any = sample_prev + sigma_prev * model_output
__magic_name__: Tuple = (sample_prev - pred_original_sample) / sigma_prev
__magic_name__: List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[int]:
raise NotImplementedError()
| 96
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = abs(lowercase )
snake_case : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : int = abs(lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return sum(int(lowercase ) for c in str(abs(lowercase ) ) )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase ,lowercase ) -> None:
snake_case : str = f"""{func.__name__}({value})"""
snake_case : Union[str, Any] = timeit(f"""__main__.{call}""" ,setup="""import __main__""" )
print(f"""{call:56} = {func(lowercase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase ,lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 587
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = ['input_features', 'attention_mask']
def __init__( self : Tuple , a : List[str]=80 , a : List[Any]=16_000 , a : Any=80 , a : str=0.0 , a : List[Any]=True , a : List[str]=True , a : int=True , **a : str , )-> int:
"""simple docstring"""
super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a )
lowercase__ = num_mel_bins
lowercase__ = do_ceptral_normalize
lowercase__ = normalize_means
lowercase__ = normalize_vars
lowercase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : np.ndarray , )-> np.ndarray:
"""simple docstring"""
lowercase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase__ = torch.from_numpy(a ).unsqueeze(0 )
lowercase__ = ta_kaldi.fbank(a , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : np.ndarray , a : int , a : Optional[bool] = True , a : Optional[bool] = True , a : float = 0.0 , )-> np.ndarray:
"""simple docstring"""
if normalize_means:
lowercase__ = x[:input_length].mean(axis=0 )
lowercase__ = np.subtract(a , a )
if normalize_vars:
lowercase__ = x[:input_length].std(axis=0 )
lowercase__ = np.divide(a , a )
if input_length < x.shape[0]:
lowercase__ = padding_value
# make sure array is in float32
lowercase__ = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[np.ndarray] , a : Optional[np.ndarray] = None )-> List[np.ndarray]:
"""simple docstring"""
lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(a , a , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(a , a )
]
def __call__( self : Tuple , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Union[bool, str, PaddingStrategy] = False , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[int] = None , a : Optional[bool] = None , **a : List[Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [raw_speech]
# extract fbank features
lowercase__ = [self._extract_fbank_features(a ) for waveform in raw_speech]
# convert into correct format for padding
lowercase__ = BatchFeature({'input_features': features} )
lowercase__ = self.pad(
a , padding=a , max_length=a , truncation=a , pad_to_multiple_of=a , return_attention_mask=a , **a , )
# make sure list is in array format
lowercase__ = padded_inputs.get('input_features' )
if isinstance(input_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
lowercase__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__ = [np.asarray(a , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase__ = (
np.array(a , dtype=np.intaa )
if self._get_padding_strategies(a , max_length=a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__ = self.normalize(
padded_inputs['input_features'] , attention_mask=a )
if return_tensors is not None:
lowercase__ = padded_inputs.convert_to_tensors(a )
return padded_inputs
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.1_5},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
a_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ = '''allenai'''
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Dict = dict((re.sub(R"""@@$""" , """""" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
snake_case_ : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
snake_case_ : str = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
snake_case_ : List[str] = basename(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = dirname(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
snake_case_ : Optional[Any] = cls.hub_models()
snake_case_ : str = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
snake_case_ : int = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
snake_case_ : Union[str, Any] = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = vars(chkpt["""args"""]["""model"""] )
snake_case_ : Any = args["""source_lang"""]
snake_case_ : List[Any] = args["""target_lang"""]
snake_case_ : Optional[Any] = dirname(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
snake_case_ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , f'dict.{src_lang}.txt' )
snake_case_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , f'dict.{tgt_lang}.txt' )
snake_case_ : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = rewrite_dict_keys(src_dict.indices )
snake_case_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-src.json""" )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
snake_case_ : Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
snake_case_ : Any = False
break
snake_case_ : int = Dictionary.load(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = rewrite_dict_keys(tgt_dict.indices )
snake_case_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-tgt.json""" )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
snake_case_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
snake_case_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as fin:
snake_case_ : List[str] = fin.read()
snake_case_ : int = re.sub(R""" \d+$""" , """""" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
snake_case_ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
snake_case_ : Tuple = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
snake_case_ : Tuple = 5
snake_case_ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
snake_case_ : str = best_score_hparams[model_dir]["""length_penalty"""]
else:
snake_case_ : int = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
snake_case_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
snake_case_ : Optional[Any] = chkpt["""models"""][0]
snake_case_ : List[Any] = model.state_dict()
# rename keys to start with 'model.'
snake_case_ : Union[str, Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
snake_case_ : List[str] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
snake_case_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 480
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : Any = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : List[str] = AutoModelForSequenceClassification
_A : Tuple = ["""text""", ["""text"""]]
_A : Any = ["""text"""]
def __UpperCamelCase (self ):
super().setup()
snake_case_ : List[Any] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
snake_case_ : Dict = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = labels
return self.pre_processor(
[text] * len(lowercase__ ) , [f'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = outputs.logits
snake_case_ : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_snake_case = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659
| 0
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
| 589
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a__ : Any = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase : str
UpperCamelCase : List[str]
UpperCamelCase : Optional[List[str]]
@dataclass
class __magic_name__ :
UpperCamelCase : List[int]
UpperCamelCase : List[int]
UpperCamelCase : Optional[List[int]] = None
UpperCamelCase : Optional[List[int]] = None
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : List[str] = "train"
UpperCamelCase : Any = "dev"
UpperCamelCase : str = "test"
class __magic_name__ :
@staticmethod
def _lowerCamelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowerCamelCase ( __magic_name__ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowerCamelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-1_0_0 , __magic_name__=0 , __magic_name__=True , ):
"""simple docstring"""
_lowerCAmelCase = {label: i for i, label in enumerate(__magic_name__ )}
_lowerCAmelCase = []
for ex_index, example in enumerate(__magic_name__ ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' , __magic_name__ , len(__magic_name__ ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
_lowerCAmelCase = tokenizer.tokenize(__magic_name__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__magic_name__ ) > 0:
tokens.extend(__magic_name__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__magic_name__ ) > max_seq_length - special_tokens_count:
_lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase = [sequence_a_segment_id] * len(__magic_name__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase = [cls_token] + tokens
_lowerCAmelCase = [pad_token_label_id] + label_ids
_lowerCAmelCase = [cls_token_segment_id] + segment_ids
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(__magic_name__ )
# Zero-pad up to the sequence length.
_lowerCAmelCase = max_seq_length - len(__magic_name__ )
if pad_on_left:
_lowerCAmelCase = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(__magic_name__ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(__magic_name__ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(__magic_name__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(__magic_name__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(__magic_name__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : List[InputFeatures]
UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ):
"""simple docstring"""
_lowerCAmelCase = os.path.join(
__magic_name__ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
_lowerCAmelCase = torch.load(__magic_name__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
_lowerCAmelCase = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __magic_name__ )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __magic_name__ ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
UpperCamelCase : List[InputFeatures]
UpperCamelCase : int = -100
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ):
"""simple docstring"""
_lowerCAmelCase = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = tf.data.Dataset.from_generator(
__magic_name__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_lowerCAmelCase = tf.data.Dataset.from_generator(
__magic_name__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __magic_name__ ):
"""simple docstring"""
return self.features[i]
| 589
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ):
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Union[str, Any] = is_training
__snake_case : int = use_labels
__snake_case : Any = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : List[str] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : int = (image_size // patch_size) ** 2
__snake_case : Union[str, Any] = num_patches + 1
def lowercase_ ( self ):
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
if self.use_labels:
__snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = ViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = ViTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : str = 1
__snake_case : List[Any] = ViTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[int] = self.type_sequence_label_size
__snake_case : Optional[int] = ViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : int = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : Optional[Any] = ViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
__snake_case : List[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = config_and_inputs
__snake_case : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Union[str, Any] = ViTModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowercase_ ( self ):
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(_UpperCAmelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = ViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : List[str] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(_UpperCAmelCase )
__snake_case : int = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : int = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Tuple = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__snake_case : Any = ViTModel.from_pretrained('facebook/dino-vits8' ).to(_UpperCAmelCase )
__snake_case : Optional[Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
__snake_case : Optional[int] = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
__snake_case : Tuple = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : str = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase )
# verify the logits
__snake_case : Optional[Any] = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCAmelCase )
__snake_case : Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase_ ( self ):
__snake_case : Any = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
__snake_case : List[str] = self.default_image_processor
__snake_case : int = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
__snake_case : int = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__snake_case : Union[str, Any] = model(_UpperCAmelCase )
| 679
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679
| 1
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a__ : set[int] = {ord(char) for char in VALID_CHARS}
a__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
UpperCAmelCase = ""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE_ )
return decoded
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase = []
for key in product(SCREAMING_SNAKE_CASE_ , repeat=3 ):
UpperCAmelCase = try_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE_ )
return possibles
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[str] , SCREAMING_SNAKE_CASE_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ).parent.joinpath(SCREAMING_SNAKE_CASE_ ).read_text(encoding='''utf-8''' )
UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for number in data.strip().split(''',''' )]
UpperCAmelCase = filter_valid_chars(SCREAMING_SNAKE_CASE_ )
for common_word in COMMON_WORDS:
UpperCAmelCase = filter_common_word(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
UpperCAmelCase = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 51
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( A_ ,unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = MobileBertTokenizer
A_ : Dict = MobileBertTokenizerFast
A_ : Tuple = True
A_ : Union[str, Any] = True
A_ : List[str] = filter_non_english
A_ : int = 'google/mobilebert-uncased'
def _A ( self ):
'''simple docstring'''
super().setUp()
a__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
a__ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = """UNwant\u00E9d,running"""
a__ = """unwanted, running"""
return input_text, output_text
def _A ( self ):
'''simple docstring'''
a__ = self.tokenizer_class(self.vocab_file )
a__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def _A ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = """UNwant\u00E9d,running"""
a__ = tokenizer.tokenize(lowerCamelCase )
a__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
a__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = self.get_rust_tokenizer()
a__ = tokenizer.encode(lowerCamelCase )
a__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# With lower casing
a__ = self.get_tokenizer(do_lower_case=lowerCamelCase )
a__ = self.get_rust_tokenizer(do_lower_case=lowerCamelCase )
a__ = """UNwant\u00E9d,running"""
a__ = tokenizer.tokenize(lowerCamelCase )
a__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
a__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = self.get_rust_tokenizer()
a__ = tokenizer.encode(lowerCamelCase )
a__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _A ( self ):
'''simple docstring'''
a__ = BasicTokenizer(do_lower_case=lowerCamelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _A ( self ):
'''simple docstring'''
a__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a__ = {}
for i, token in enumerate(lowerCamelCase ):
a__ = i
a__ = WordpieceTokenizer(vocab=lowerCamelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _A ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _A ( self ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _A ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _A ( self ):
'''simple docstring'''
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _A ( self ):
'''simple docstring'''
a__ = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
a__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
a__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
a__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
a__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _A ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
a__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
a__ = tokenizer_r.encode_plus(
lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , )
a__ = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , """do_lower_case""" ) else False
a__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _A ( self ):
'''simple docstring'''
a__ = ["""的""", """人""", """有"""]
a__ = """""".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = True
a__ = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
a__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
a__ = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
a__ = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
a__ = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
a__ = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = False
a__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
a__ = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
a__ = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
a__ = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
a__ = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
a__ = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
a__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 412
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
a__ = tempfile.mkdtemp()
a__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
a__ = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"""do_convert_rgb""": True,
}
a__ = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def _A ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _A ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _A ( self , **lowerCamelCase ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _A ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self ):
'''simple docstring'''
a__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self ):
'''simple docstring'''
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = self.get_image_processor()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
a__ = self.get_image_processor(do_normalize=lowerCamelCase )
a__ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = self.prepare_image_inputs()
a__ = image_processor(lowerCamelCase , return_tensors="""np""" )
a__ = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = """Alexandra,T-shirt的价格是15便士。"""
a__ = processor(text=lowerCamelCase )
a__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = """Alexandra,T-shirt的价格是15便士。"""
a__ = self.prepare_image_inputs()
a__ = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(lowerCamelCase )
a__ = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
a__ = """Alexandra,T-shirt的价格是15便士。"""
a__ = self.prepare_image_inputs()
a__ = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 412
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 323
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCamelCase : str = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__lowerCamelCase : Optional[Any] = dataset.iloc[:, 1:2].values
__lowerCamelCase : Any = dataset.iloc[:, 2].values
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCamelCase : List[Any] = PolynomialFeatures(degree=4)
__lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
__lowerCamelCase : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_() -> List[str]:
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color="red" )
plt.plot(lowerCamelCase_ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 323
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
__lowerCamelCase : Optional[int] ='big_bird'
def __init__( self : Dict , __lowercase : Dict=50358 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : int=12 , __lowercase : Union[str, Any]=3072 , __lowercase : Optional[int]="gelu_new" , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[Any]=4096 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : str=1E-12 , __lowercase : Optional[int]=True , __lowercase : Tuple=0 , __lowercase : int=1 , __lowercase : str=2 , __lowercase : Union[str, Any]=66 , __lowercase : int="block_sparse" , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=False , __lowercase : Any=64 , __lowercase : Union[str, Any]=3 , __lowercase : Tuple=None , **__lowercase : int , ):
'''simple docstring'''
super().__init__(
pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , sep_token_id=A__ , **A__ , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 700
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 3 , _SCREAMING_SNAKE_CASE : int = 7 , _SCREAMING_SNAKE_CASE : int = 100_0000 ):
"""simple docstring"""
__a = 0
__a = 1
for current_denominator in range(1 , limit + 1 ):
__a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__a = current_numerator
__a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 547
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 246
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
"""simple docstring"""
a = BeautifulSoup(requests.get(snake_case_, params=snake_case_ ).content, '''html.parser''' )
a = soup.find('''div''', attrs={'''class''': '''gs_ri'''} )
a = div.find('''div''', attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCamelCase__ : Tuple = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 387
| 0
|
import itertools
import math
def lowerCAmelCase_ ( __lowerCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = 2
while True:
if is_prime(__lowerCamelCase ):
yield num
num += 1
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_1 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCamelCase ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 203
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__snake_case : Dict = str(bin(__lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__snake_case : List[str] = str(bin(__lowerCamelCase ) )[2:]
if shift_amount >= len(__lowerCamelCase ):
return "0b0"
__snake_case : Union[str, Any] = binary_number[: len(__lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number >= 0: # Get binary representation of positive number
__snake_case : Optional[int] = "0" + str(bin(__lowerCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
__snake_case : Tuple = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number
__snake_case : Any = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
__snake_case : Optional[Any] = (
"1" + "0" * (binary_number_length - len(__lowerCamelCase )) + binary_number
)
if shift_amount >= len(__lowerCamelCase ):
return "0b" + binary_number[0] * len(__lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
| 1
|
def _A ( lowerCAmelCase_ : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 61
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
| 0
|
"""simple docstring"""
__UpperCamelCase = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> int:
SCREAMING_SNAKE_CASE = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
SCREAMING_SNAKE_CASE = Stack()
SCREAMING_SNAKE_CASE = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE = operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCamelCase = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 708
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowercase (SCREAMING_SNAKE_CASE_ : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
SCREAMING_SNAKE_CASE = QuantumRegister(SCREAMING_SNAKE_CASE_ , 'qr' )
SCREAMING_SNAKE_CASE = ClassicalRegister(SCREAMING_SNAKE_CASE_ , 'cr' )
SCREAMING_SNAKE_CASE = QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE = Aer.get_backend('qasm_simulator' )
SCREAMING_SNAKE_CASE = execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_00_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 327
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : List[str] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """philschmid/bart-large-cnn-samsum"""
UpperCAmelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCAmelCase__ = """summarizer"""
UpperCAmelCase__ = AutoTokenizer
UpperCAmelCase__ = AutoModelForSeqaSeqLM
UpperCAmelCase__ = ["""text"""]
UpperCAmelCase__ = ["""text"""]
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> List[str]:
return self.pre_processor(UpperCAmelCase , return_tensors='pt' , truncation=UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : Dict ) -> str:
return self.model.generate(**UpperCAmelCase )[0]
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return self.pre_processor.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
| 295
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''input_features''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=80 , UpperCAmelCase__ : List[str]=1_6000 , UpperCAmelCase__ : Dict=80 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Optional[int]:
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = do_ceptral_normalize
UpperCAmelCase_ = normalize_means
UpperCAmelCase_ = normalize_vars
UpperCAmelCase_ = True
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , ) ->np.ndarray:
UpperCAmelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase_ = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : float = 0.0 , ) ->np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
UpperCAmelCase_ = x[:input_length].mean(axis=0 )
UpperCAmelCase_ = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if normalize_vars:
UpperCAmelCase_ = x[:input_length].std(axis=0 )
UpperCAmelCase_ = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
UpperCAmelCase_ = padding_value
# make sure array is in float32
UpperCAmelCase_ = x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : Optional[np.ndarray] = None ) ->List[np.ndarray]:
UpperCAmelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
def __call__( self : Optional[int] , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
UpperCAmelCase_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [raw_speech]
# extract fbank features
UpperCAmelCase_ = [self._extract_fbank_features(UpperCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ = BatchFeature({'''input_features''': features} )
UpperCAmelCase_ = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
UpperCAmelCase_ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase__ ):
UpperCAmelCase_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase_ = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase_ = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase_ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"]
lowercase__ : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
class lowercase ( folder_based_builder.FolderBasedBuilderConfig ):
lowercase = None
lowercase = None
class lowercase ( folder_based_builder.FolderBasedBuilder ):
lowercase = datasets.Audio()
lowercase = '''audio'''
lowercase = AudioFolderConfig
lowercase = 42 # definition at the bottom of the script
lowercase = AudioClassification(audio_column='''audio''' ,label_column='''label''' )
UpperCAmelCase = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
UpperCAmelCase = AUDIO_EXTENSIONS
| 535
|
'''simple docstring'''
import functools
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : list[int] ) -> int:
# Validation
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_UpperCAmelCase ) == 0:
return 0
if min(_UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_UpperCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
__snake_case : str = set(_UpperCAmelCase )
@functools.cache
def dynamic_programming(_UpperCAmelCase : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> List[Any]:
lowerCamelCase_ : int = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A__ )
lowerCamelCase_ : Dict = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=A__ )
env_command_parser(subparsers=A__ )
launch_command_parser(subparsers=A__ )
tpu_command_parser(subparsers=A__ )
test_command_parser(subparsers=A__ )
# Let's go
lowerCamelCase_ : List[str] = parser.parse_args()
if not hasattr(A__ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(A__ )
if __name__ == "__main__":
main()
| 721
|
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int ) ->None:
lowerCamelCase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase_ : str = False
def _lowerCAmelCase ( self : List[Any] , __a : list[str] ) ->None:
for word in words:
self.insert(__a )
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
lowerCamelCase_ : int = self
for char in word:
if char not in curr.nodes:
lowerCamelCase_ : Optional[Any] = TrieNode()
lowerCamelCase_ : Union[str, Any] = curr.nodes[char]
lowerCamelCase_ : int = True
def _lowerCAmelCase ( self : Tuple , __a : str ) ->bool:
lowerCamelCase_ : Any = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase_ : Any = curr.nodes[char]
return curr.is_leaf
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
def _delete(__a : TrieNode , __a : str , __a : int ) -> bool:
if index == len(__a ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase_ : Tuple = False
return len(curr.nodes ) == 0
lowerCamelCase_ : Dict = word[index]
lowerCamelCase_ : Dict = curr.nodes.get(__a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase_ : Any = _delete(__a , __a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __a , 0 )
def __lowerCamelCase ( A__ : TrieNode , A__ : str ) -> None:
if node.is_leaf:
print(A__ , end=""" """ )
for key, value in node.nodes.items():
print_words(A__ , word + key )
def __lowerCamelCase ( ) -> bool:
lowerCamelCase_ : List[Any] = """banana bananas bandana band apple all beast""".split()
lowerCamelCase_ : Optional[int] = TrieNode()
root.insert_many(A__ )
# print_words(root, "")
assert all(root.find(A__ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCamelCase ( A__ : str , A__ : bool ) -> None:
print(str(A__ ) , """works!""" if passes else """doesn't work :(""" )
def __lowerCamelCase ( ) -> None:
assert test_trie()
def __lowerCamelCase ( ) -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 171
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : int ={
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowercase ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''wav2vec2'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :str=768 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :List[str]=12 , lowerCAmelCase__ :Tuple=3_072 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :Dict="group" , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Dict=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__ :str=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ :Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :int=128 , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Tuple=0.05 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :Dict=10 , lowerCAmelCase__ :Union[str, Any]=0 , lowerCAmelCase__ :int=320 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :str=100 , lowerCAmelCase__ :int=256 , lowerCAmelCase__ :Tuple=256 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Dict="sum" , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Any=256 , lowerCAmelCase__ :Dict=(512, 512, 512, 512, 1_500) , lowerCAmelCase__ :List[Any]=(5, 3, 3, 1, 1) , lowerCAmelCase__ :Optional[Any]=(1, 2, 3, 1, 1) , lowerCAmelCase__ :Tuple=512 , lowerCAmelCase__ :Optional[int]=0 , lowerCAmelCase__ :Tuple=1 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Tuple , ) -> Union[str, Any]:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = feat_extract_norm
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_activation
__SCREAMING_SNAKE_CASE : Dict = list(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = list(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Dict = list(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = conv_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE : Optional[int] = len(self.conv_dim )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : int = hidden_dropout
__SCREAMING_SNAKE_CASE : Dict = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = activation_dropout
__SCREAMING_SNAKE_CASE : str = feat_proj_dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = final_dropout
__SCREAMING_SNAKE_CASE : int = layerdrop
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = do_stable_layer_norm
__SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE : Dict = apply_spec_augment
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_prob
__SCREAMING_SNAKE_CASE : List[str] = mask_time_length
__SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
__SCREAMING_SNAKE_CASE : Tuple = mask_feature_prob
__SCREAMING_SNAKE_CASE : int = mask_feature_length
__SCREAMING_SNAKE_CASE : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_codevectors_per_group
__SCREAMING_SNAKE_CASE : List[str] = num_codevector_groups
__SCREAMING_SNAKE_CASE : List[Any] = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE : Dict = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE : List[str] = num_negatives
__SCREAMING_SNAKE_CASE : Any = codevector_dim
__SCREAMING_SNAKE_CASE : List[Any] = proj_codevector_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE : Optional[Any] = ctc_loss_reduction
__SCREAMING_SNAKE_CASE : List[str] = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE : List[Any] = add_adapter
__SCREAMING_SNAKE_CASE : Tuple = adapter_kernel_size
__SCREAMING_SNAKE_CASE : Optional[Any] = adapter_stride
__SCREAMING_SNAKE_CASE : Dict = num_adapter_layers
__SCREAMING_SNAKE_CASE : Tuple = output_hidden_size or hidden_size
__SCREAMING_SNAKE_CASE : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE : Optional[Any] = list(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = list(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : int = list(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = xvector_output_dim
@property
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 696
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 524
| 0
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCAmelCase = re.compile(r'\s+')
def a__ ( a ) -> Optional[Any]:
return {"hash": hashlib.mda(re.sub(a , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def a__ ( a ) -> Dict:
A_ : Dict = [len(a ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(a ), "line_max": max(a )}
def a__ ( a ) -> List[str]:
A_ : List[str] = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def a__ ( a , a ) -> List[str]:
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def a__ ( a , a=5 ) -> str:
A_ : str = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
A_ : Any = example['''content'''].splitlines()
for _, line in zip(range(a ) , a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a__ ( a , a=5 , a=0.0_5 ) -> Any:
A_ : Optional[int] = ['''unit tests''', '''test file''', '''configuration file''']
A_ : List[str] = example['''content'''].splitlines()
A_ : List[Any] = 0
A_ : Optional[int] = 0
# first test
for _, line in zip(range(a ) , a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[str] = example['''content'''].count('''\n''' )
A_ : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a__ ( a ) -> Tuple:
A_ : Optional[Any] = ['''def ''', '''class ''', '''for ''', '''while ''']
A_ : Dict = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a__ ( a , a=4 ) -> str:
A_ : Dict = example['''content'''].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a__ ( a ) -> List[Any]:
A_ : Optional[int] = tokenizer(example['''content'''] , truncation=a )['''input_ids''']
A_ : Union[str, Any] = len(example['''content'''] ) / len(a )
return {"ratio": ratio}
def a__ ( a ) -> Any:
A_ : int = {}
results.update(get_hash(a ) )
results.update(line_stats(a ) )
results.update(alpha_stats(a ) )
results.update(char_token_ratio(a ) )
results.update(is_autogenerated(a ) )
results.update(is_config_or_test(a ) )
results.update(has_no_keywords(a ) )
results.update(has_few_assignments(a ) )
return results
def a__ ( a , a , a ) -> List[str]:
if not check_uniques(a , a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a__ ( a ) -> Tuple:
with open(a , '''rb''' ) as f_in:
with gzip.open(str(a ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(a , a )
os.unlink(a )
# Settings
_lowerCAmelCase = HfArgumentParser(PreprocessingArguments)
_lowerCAmelCase = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase = multiprocessing.cpu_count()
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCAmelCase = time.time()
_lowerCAmelCase = load_dataset(args.dataset_name, split='train')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCAmelCase = time.time()
_lowerCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCAmelCase = set(ds.unique('hash'))
_lowerCAmelCase = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCAmelCase = time.time()
_lowerCAmelCase = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCAmelCase = time.time()
_lowerCAmelCase , _lowerCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
_lowerCAmelCase = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
_lowerCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCAmelCase = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 236
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCAmelCase = re.compile(r'\s+')
def a__ ( a ) -> Optional[Any]:
return {"hash": hashlib.mda(re.sub(a , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def a__ ( a ) -> Dict:
A_ : Dict = [len(a ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(a ), "line_max": max(a )}
def a__ ( a ) -> List[str]:
A_ : List[str] = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def a__ ( a , a ) -> List[str]:
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def a__ ( a , a=5 ) -> str:
A_ : str = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
A_ : Any = example['''content'''].splitlines()
for _, line in zip(range(a ) , a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a__ ( a , a=5 , a=0.0_5 ) -> Any:
A_ : Optional[int] = ['''unit tests''', '''test file''', '''configuration file''']
A_ : List[str] = example['''content'''].splitlines()
A_ : List[Any] = 0
A_ : Optional[int] = 0
# first test
for _, line in zip(range(a ) , a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[str] = example['''content'''].count('''\n''' )
A_ : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a__ ( a ) -> Tuple:
A_ : Optional[Any] = ['''def ''', '''class ''', '''for ''', '''while ''']
A_ : Dict = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a__ ( a , a=4 ) -> str:
A_ : Dict = example['''content'''].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a__ ( a ) -> List[Any]:
A_ : Optional[int] = tokenizer(example['''content'''] , truncation=a )['''input_ids''']
A_ : Union[str, Any] = len(example['''content'''] ) / len(a )
return {"ratio": ratio}
def a__ ( a ) -> Any:
A_ : int = {}
results.update(get_hash(a ) )
results.update(line_stats(a ) )
results.update(alpha_stats(a ) )
results.update(char_token_ratio(a ) )
results.update(is_autogenerated(a ) )
results.update(is_config_or_test(a ) )
results.update(has_no_keywords(a ) )
results.update(has_few_assignments(a ) )
return results
def a__ ( a , a , a ) -> List[str]:
if not check_uniques(a , a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a__ ( a ) -> Tuple:
with open(a , '''rb''' ) as f_in:
with gzip.open(str(a ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(a , a )
os.unlink(a )
# Settings
_lowerCAmelCase = HfArgumentParser(PreprocessingArguments)
_lowerCAmelCase = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase = multiprocessing.cpu_count()
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCAmelCase = time.time()
_lowerCAmelCase = load_dataset(args.dataset_name, split='train')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_lowerCAmelCase = time.time()
_lowerCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_lowerCAmelCase = set(ds.unique('hash'))
_lowerCAmelCase = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_lowerCAmelCase = time.time()
_lowerCAmelCase = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCAmelCase = time.time()
_lowerCAmelCase , _lowerCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_lowerCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
_lowerCAmelCase = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
_lowerCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCAmelCase = str(data_dir / F'file-{file_number+1:012}.json')
_lowerCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 236
| 1
|
'''simple docstring'''
import os
from collections.abc import Iterator
def lowercase_ ( _lowercase = "." ) -> str:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowerCamelCase_ ):
lowerCamelCase_ : Optional[Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase_ , lowerCamelCase_ ).lstrip('''./''' )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return F"""{i * " "}*""" if i else "\n##"
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase_ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(lowerCamelCase_ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def lowercase_ ( _lowercase = "." ) -> int:
'''simple docstring'''
lowerCamelCase_ : Tuple = ""
for filepath in sorted(good_file_paths(lowerCamelCase_ ) ):
lowerCamelCase_ : List[str] = os.path.split(lowerCamelCase_ )
if filepath != old_path:
lowerCamelCase_ : Any = print_path(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase_ : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCamelCase_ : Optional[int] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
lowerCamelCase_ : Dict = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(lowerCamelCase_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 422
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = '\nHuman: <<task>>\n\nAssistant: '
A = 'huggingface-tools/default-prompts'
A = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: str , lowerCamelCase_: Tuple="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
snake_case : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
snake_case : Optional[int] = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 449
| 0
|
import numpy as np
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Tuple = (0, 0)
lowercase_ :List[Any] = None
lowercase_ :str = 0
lowercase_ :Any = 0
lowercase_ :Optional[Any] = 0
def __eq__( self , UpperCamelCase_ ):
return self.position == cell.position
def UpperCamelCase ( self ):
print(self.position )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_=(5, 5) ):
lowercase_ :Tuple = np.zeros(UpperCamelCase_ )
lowercase_ :List[str] = world_size[0]
lowercase_ :Tuple = world_size[1]
def UpperCamelCase ( self ):
print(self.w )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowercase_ :Optional[int] = cell.position[0]
lowercase_ :Union[str, Any] = cell.position[1]
lowercase_ :str = []
for n in neughbour_cord:
lowercase_ :int = current_x + n[0]
lowercase_ :Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowercase_ :List[Any] = Cell()
lowercase_ :List[Any] = (x, y)
lowercase_ :Optional[int] = cell
neighbours.append(UpperCamelCase_ )
return neighbours
def UpperCamelCase ( _a , _a , _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :Any = []
lowercase_ :Any = []
_open.append(_a )
while _open:
lowercase_ :Union[str, Any] = np.argmin([n.f for n in _open] )
lowercase_ :Optional[int] = _open[min_f]
_closed.append(_open.pop(_a ) )
if current == goal:
break
for n in world.get_neigbours(_a ):
for c in _closed:
if c == n:
continue
lowercase_ :int = current.g + 1
lowercase_ , lowercase_ :str = n.position
lowercase_ , lowercase_ :List[str] = goal.position
lowercase_ :List[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
lowercase_ :Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_a )
lowercase_ :Tuple = []
while current.parent is not None:
path.append(current.position )
lowercase_ :Any = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = Gridworld()
# Start position and goal
SCREAMING_SNAKE_CASE : Optional[Any] = Cell()
SCREAMING_SNAKE_CASE : Union[str, Any] = (0, 0)
SCREAMING_SNAKE_CASE : List[str] = Cell()
SCREAMING_SNAKE_CASE : Any = (4, 4)
print(f"path from {start.position} to {goal.position}")
SCREAMING_SNAKE_CASE : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
SCREAMING_SNAKE_CASE : str = 1
print(world.w)
| 441
|
import math
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _a = 2_0 ) -> str:
'''simple docstring'''
lowercase_ :List[str] = math.comb(_a , _a )
lowercase_ :Tuple = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _a )
lowercase_ :Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 441
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """vision-encoder-decoder"""
A_ = True
def __init__( self , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase_ = kwargs.pop('encoder' )
UpperCamelCase_ = encoder_config.pop('model_type' )
UpperCamelCase_ = kwargs.pop('decoder' )
UpperCamelCase_ = decoder_config.pop('model_type' )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = True
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ = True
UpperCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.encoder.to_dict()
UpperCamelCase_ = self.decoder.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape
UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ = dummy_input.pop('input_ids' )
UpperCamelCase_ = dummy_input.pop('attention_mask' )
UpperCamelCase_ = torch.zeros(_UpperCAmelCase )
return common_inputs
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig:
UpperCamelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 23
|
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = val
snake_case_ = None
snake_case_ = None
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
snake_case_ = Node(__UpperCamelCase )
else:
self.left.insert(__UpperCamelCase )
elif val > self.val:
if self.right is None:
snake_case_ = Node(__UpperCamelCase )
else:
self.right.insert(__UpperCamelCase )
else:
snake_case_ = val
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowercase__ )
res.append(root.val )
inorder(root.right , lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# Build BST
if len(lowercase__ ) == 0:
return arr
snake_case_ = Node(arr[0] )
for i in range(1 , len(lowercase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
snake_case_ = []
inorder(lowercase__ , lowercase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 187
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase_ : Tuple = '''bart'''
UpperCAmelCase_ : List[Any] = True
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Tuple = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCamelCase :Dict = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCamelCase :List[Any] = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase :Dict = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCamelCase :List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCamelCase :int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCamelCase :Optional[Any] = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase :List[Any] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Dict = faiss.StandardGpuResources()
UpperCamelCase :Tuple = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCamelCase :Optional[Any] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase :str = faiss.IndexFlatIP(128 )
UpperCamelCase :Tuple = faiss.index_cpu_to_gpu(lowerCAmelCase__ , 1 , lowerCAmelCase__ )
wikiaab_gpu_index_flat.add(lowerCAmelCase__ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase :str = (None, None)
UpperCamelCase :Dict = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
UpperCamelCase :Optional[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCamelCase :Optional[int] = elia["""train_eli5"""]
UpperCamelCase :Union[str, Any] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCamelCase :Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase__ )
return (elia_train, eli5_train_q_index)
UpperCAmelCase_ : str = load_indexes()
UpperCAmelCase_ : List[Any] = load_models()
UpperCAmelCase_ : List[Any] = load_train_data()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Tuple=10 ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Any = embed_questions_for_retrieval([question] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase , UpperCamelCase :Dict = eli5_train_q_index.search(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase :List[str] = [elia_train[int(lowerCAmelCase__ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[Any]="wiki40b" , __magic_name__ : str="dense" , __magic_name__ : Optional[Any]=10 ) -> Dict:
"""simple docstring"""
if source == "none":
UpperCamelCase , UpperCamelCase :Union[str, Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase :List[Any] = query_qa_dense_index(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
UpperCamelCase , UpperCamelCase :Optional[int] = query_es_index(
lowerCAmelCase__ , lowerCAmelCase__ , index_name="""english_wiki40b_snippets_100w""" , n_results=lowerCAmelCase__ , )
UpperCamelCase :Optional[Any] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCamelCase :int = """question: {} context: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __magic_name__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __magic_name__ : None),
} )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=64 , __magic_name__ : Union[str, Any]=256 , __magic_name__ : Optional[Any]=False , __magic_name__ : Tuple=2 , __magic_name__ : Dict=0.95 , __magic_name__ : Union[str, Any]=0.8 ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase :Union[str, Any] = qa_sas_generate(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_answers=1 , num_beams=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ , do_sample=lowerCAmelCase__ , temp=lowerCAmelCase__ , top_p=lowerCAmelCase__ , top_k=lowerCAmelCase__ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCAmelCase_ : Union[str, Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
UpperCAmelCase_ : Tuple = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase_ : str = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase_ : Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCAmelCase_ : List[str] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCAmelCase_ : List[Any] = action_list.index(action_st)
UpperCAmelCase_ : List[str] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCAmelCase_ : Union[str, Any] = show_type == '''Show full text of passages'''
else:
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Dict = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCAmelCase_ : Optional[int] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
UpperCAmelCase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCAmelCase_ : Dict = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCAmelCase_ : List[str] = '''wiki40b'''
UpperCAmelCase_ : int = '''dense'''
UpperCAmelCase_ : str = '''beam'''
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : Union[str, Any] = 64
UpperCAmelCase_ : List[str] = 2_56
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCAmelCase_ : Dict = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCAmelCase_ : Any = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase_ : int = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase_ : Optional[int] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Optional[int] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Any = None
# start main text
UpperCAmelCase_ : Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
UpperCAmelCase_ : str = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase_ : Optional[Any] = st.text_input('''Enter your question here:''', '''''')
else:
UpperCAmelCase_ : Dict = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase_ : List[Any] = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCAmelCase_ : List[str] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCAmelCase_ : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase_ : Union[str, Any] = support_list[:10]
UpperCAmelCase_ : str = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
UpperCAmelCase_ : int = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase_ : Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCAmelCase_ : Tuple = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
UpperCAmelCase_ : Optional[int] = res[1].strip()
if sec_titles == "":
UpperCAmelCase_ : str = '''[{}]({})'''.format(res[0], wiki_url)
else:
UpperCAmelCase_ : Dict = sec_titles.split(''' & ''')
UpperCAmelCase_ : int = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase_ : Tuple = find_nearest_training(question)
UpperCAmelCase_ : Optional[Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCAmelCase_ : List[str] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCAmelCase_ : Optional[Any] = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 706
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Union[str, Any] , **__lowerCamelCase : int ):
super().__init__(**__lowerCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[Any] , __lowerCamelCase : Union[str, List[str], "Image", List["Image"]] , **__lowerCamelCase : Any ):
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Union[str, Any] , **__lowerCamelCase : int ):
UpperCamelCase :Optional[int] = {}
if "candidate_labels" in kwargs:
UpperCamelCase :Optional[Any] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCamelCase :str = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _A ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str=None , __lowerCamelCase : Any="This is a photo of {}." ):
UpperCamelCase :str = load_image(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase :str = candidate_labels
UpperCamelCase :Optional[int] = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
UpperCamelCase :Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = [text_inputs]
return inputs
def _A ( self : Optional[Any] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = model_inputs.pop("""candidate_labels""" )
UpperCamelCase :str = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowerCamelCase ):
UpperCamelCase :Optional[int] = text_inputs[0]
else:
# Batching case.
UpperCamelCase :Optional[Any] = text_inputs[0][0]
UpperCamelCase :List[str] = self.model(**__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = model_outputs.pop("""candidate_labels""" )
UpperCamelCase :Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCamelCase :Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase :str = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = [scores]
elif self.framework == "tf":
UpperCamelCase :Tuple = stable_softmax(__lowerCamelCase , axis=-1 )
UpperCamelCase :List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCamelCase :Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
| 590
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __lowercase ( _A ):
lowercase = 'umt5'
lowercase = ['past_key_values']
def __init__( self : str , __lowerCamelCase : Any=25_01_12 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : Any=64 , __lowerCamelCase : Any=10_24 , __lowerCamelCase : Any=8 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : int=32 , __lowerCamelCase : str=1_28 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=1E-6 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[str]="gated-gelu" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any="T5Tokenizer" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[int]=0 , **__lowerCamelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowerCamelCase , tokenizer_class=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
lowercase = vocab_size
lowercase = d_model
lowercase = d_kv
lowercase = d_ff
lowercase = num_layers
lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase = num_heads
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = feed_forward_proj
lowercase = use_cache
lowercase = self.feed_forward_proj.split('''-''' )
lowercase = act_info[-1]
lowercase = act_info[0] == '''gated'''
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase = '''gelu_new'''
@property
def __a ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return self.d_model
@property
def __a ( self : Any ) -> int:
'''simple docstring'''
return self.num_heads
@property
def __a ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.num_layers
class __lowercase ( _A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase = '''past_encoder_sequence + sequence'''
lowercase = {0: '''batch'''}
lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __a ( self : List[str] ) -> int:
'''simple docstring'''
return 13
@property
def __a ( self : int ) -> float:
'''simple docstring'''
return 5E-4
| 604
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A_ = imread(R"digital_image_processing/image_data/lena_small.jpg")
A_ = cvtColor(img, COLOR_BGR2GRAY)
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
lowercase = cn.convert_to_negative(UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase, 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
lowercase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
lowercase = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase = canny.canny(UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
assert gg.gaussian_filter(UpperCAmelCase, 5, sigma=0.9 ).all()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
lowercase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase = conv.img_convolve(UpperCAmelCase, UpperCAmelCase ).astype(UpperCAmelCase )
assert res.any()
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
assert med.median_filter(UpperCAmelCase, 3 ).any()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
lowercase ,lowercase = sob.sobel_filter(UpperCAmelCase )
assert grad.any() and theta.any()
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
lowercase = sp.make_sepia(UpperCAmelCase, 20 )
assert sepia.all()
def __UpperCAmelCase ( UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" )-> str:
"""simple docstring"""
lowercase = bs.Burkes(imread(UpperCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def __UpperCAmelCase ( UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg", )-> str:
"""simple docstring"""
lowercase = rs.NearestNeighbour(imread(UpperCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
lowercase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowercase = imread(UpperCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowercase = 0
lowercase = 0
lowercase = image[x_coordinate][y_coordinate]
lowercase = lbp.get_neighbors_pixel(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowercase = lbp.local_binary_value(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
assert lbp_image.any()
| 604
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=UpperCamelCase , )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase )
class snake_case ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=UpperCamelCase , )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase )
def __snake_case ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __snake_case ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case ( lowercase ):
"""simple docstring"""
@require_beam
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ = DummyBeamDataset(cache_dir=UpperCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def snake_case ( self ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase_ = beam.io.parquetio.WriteToParquet
lowerCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ = DummyBeamDataset(cache_dir=UpperCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowerCamelCase_ = partial(UpperCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ = DummyBeamDataset(cache_dir=UpperCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ = NestedBeamDataset(cache_dir=UpperCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowerCamelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , UpperCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 704
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 445
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.