code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from ...processing_utils import ProcessorMixin
class a ( __lowerCamelCase ):
__lowerCAmelCase : int = """WhisperFeatureExtractor"""
__lowerCAmelCase : Optional[int] = """WhisperTokenizer"""
def __init__( self :Dict ,__lowercase :Dict ,__lowercase :Tuple ):
super().__init__(__lowercase ,__lowercase )
snake_case__ : List[str] = self.feature_extractor
snake_case__ : Optional[Any] = False
def __lowerCamelCase ( self :Any ,__lowercase :Any=None ,__lowercase :Any=None ,__lowercase :int=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__lowercase ,language=__lowercase ,no_timestamps=__lowercase )
def __call__( self :List[str] ,*__lowercase :int ,**__lowercase :Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase ,**__lowercase )
snake_case__ : Dict = kwargs.pop('''audio''' ,__lowercase )
snake_case__ : Tuple = kwargs.pop('''sampling_rate''' ,__lowercase )
snake_case__ : Union[str, Any] = kwargs.pop('''text''' ,__lowercase )
if len(__lowercase ) > 0:
snake_case__ : Any = args[0]
snake_case__ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case__ : str = self.feature_extractor(__lowercase ,*__lowercase ,sampling_rate=__lowercase ,**__lowercase )
if text is not None:
snake_case__ : List[str] = self.tokenizer(__lowercase ,**__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ : Optional[Any] = encodings['''input_ids''']
return inputs
def __lowerCamelCase ( self :Dict ,*__lowercase :str ,**__lowercase :int ):
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Optional[int] ,*__lowercase :str ,**__lowercase :Tuple ):
return self.tokenizer.decode(*__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[Any]="np" ):
return self.tokenizer.get_prompt_ids(__lowercase ,return_tensors=__lowercase )
| 230 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230 | 1 |
import math
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = [True] * n
snake_case = False
snake_case = False
snake_case = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
snake_case = i * 2
while index < n:
snake_case = False
snake_case = index + i
snake_case = [2]
for i in range(3 ,UpperCamelCase_ ,2 ):
if is_prime[i]:
primes.append(UpperCamelCase_ )
return primes
def UpperCAmelCase__ (UpperCamelCase_ = 99_99_66_66_33_33 ):
"""simple docstring"""
snake_case = math.floor(math.sqrt(UpperCamelCase_ ) ) + 1_00
snake_case = prime_sieve(UpperCamelCase_ )
snake_case = 0
snake_case = 0
snake_case = primes[prime_index]
while (last_prime**2) <= limit:
snake_case = primes[prime_index + 1]
snake_case = last_prime**2
snake_case = next_prime**2
# Get numbers divisible by lps(current)
snake_case = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
snake_case = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
snake_case = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
snake_case = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 213 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case = len(__snake_case ) - 1
def a_ ( self , __snake_case ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__snake_case ) , 5 ) == 1
return output_values
def a_ ( self , __snake_case ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case = self.basis_function(__snake_case )
snake_case = 0.0
snake_case = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def a_ ( self , __snake_case = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
snake_case = [] # x coordinates of points to plot
snake_case = [] # y coordinates of points to plot
snake_case = 0.0
while t <= 1:
snake_case = self.bezier_curve_function(__snake_case )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case = [i[0] for i in self.list_of_points]
snake_case = [i[1] for i in self.list_of_points]
plt.plot(
__snake_case , __snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__snake_case , __snake_case , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 213 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
A__ : str = '''bert-base-cased'''
A__ : Optional[int] = '''fp16'''
A__ : Optional[int] = '''bf16'''
A__ : Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : str):
super().setUp()
lowerCAmelCase_ : int = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A_):
lowerCAmelCase_ : Optional[int] = self.dist_env.copy()
lowerCAmelCase_ : Optional[Any] = F"""{i + 1}"""
lowerCAmelCase_ : List[Any] = strategy
with mockenv_context(**A_):
lowerCAmelCase_ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A_):
lowerCAmelCase_ : Tuple = self.dist_env.copy()
lowerCAmelCase_ : str = prefetch_policy
with mockenv_context(**A_):
lowerCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def UpperCAmelCase__ ( self : int):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A_):
lowerCAmelCase_ : Optional[int] = self.dist_env.copy()
lowerCAmelCase_ : str = state_dict_type
with mockenv_context(**A_):
lowerCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : str = AutoModel.from_pretrained(A_)
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCAmelCase_ : Tuple = self.dist_env.copy()
lowerCAmelCase_ : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCAmelCase_ : List[str] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
lowerCAmelCase_ : Optional[int] = '''2000'''
with mockenv_context(**A_):
lowerCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A_)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowerCAmelCase_ : str = self.dist_env.copy()
lowerCAmelCase_ : List[str] = '''TRANSFORMER_BASED_WRAP'''
lowerCAmelCase_ : Optional[int] = '''T5Layer'''
with mockenv_context(**A_):
lowerCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
with self.assertRaises(A_) as cm:
fsdp_plugin.set_auto_wrap_policy(A_)
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception))
lowerCAmelCase_ : Union[str, Any] = self.dist_env.copy()
lowerCAmelCase_ : List[Any] = '''SIZE_BASED_WRAP'''
lowerCAmelCase_ : List[Any] = '''0'''
with mockenv_context(**A_):
lowerCAmelCase_ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A_)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def UpperCAmelCase__ ( self : Optional[Any]):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCAmelCase_ : Dict = self.dist_env.copy()
lowerCAmelCase_ : List[str] = mp_dtype
with mockenv_context(**A_):
lowerCAmelCase_ : List[Any] = Accelerator()
if mp_dtype == "fp16":
lowerCAmelCase_ : Optional[Any] = torch.floataa
elif mp_dtype == "bf16":
lowerCAmelCase_ : Any = torch.bfloataa
lowerCAmelCase_ : Optional[Any] = MixedPrecision(param_dtype=A_ , reduce_dtype=A_ , buffer_dtype=A_)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A_)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A_))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(A_)
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCAmelCase_ : Any = self.dist_env.copy()
lowerCAmelCase_ : Union[str, Any] = str(A_).lower()
with mockenv_context(**A_):
lowerCAmelCase_ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A_))
@require_fsdp
@require_multi_gpu
@slow
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : List[Any]):
super().setUp()
lowerCAmelCase_ : Optional[Any] = 0.82
lowerCAmelCase_ : Union[str, Any] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
lowerCAmelCase_ : Union[str, Any] = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCAmelCase_ : Union[str, Any] = 1_6_0
lowerCAmelCase_ : List[Any] = 1_6_0
lowerCAmelCase_ : Dict = inspect.getfile(accelerate.test_utils)
lowerCAmelCase_ : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps'''])
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = os.path.join(self.test_scripts_folder , '''test_performance.py''')
lowerCAmelCase_ : Tuple = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
lowerCAmelCase_ : Union[str, Any] = cmd.copy()
for i, strategy in enumerate(A_):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''')
else:
cmd_config.append('''--mixed_precision=fp16''')
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''')
lowerCAmelCase_ : List[str] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(A_):
lowerCAmelCase_ : List[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
if strategy != "FULL_SHARD":
continue
lowerCAmelCase_ : int = len(A_)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCAmelCase_ : List[str] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""")
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
lowerCAmelCase_ : int = cmd_config[:-1]
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdir , '''epoch_0''')
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''')
lowerCAmelCase_ : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCAmelCase_ : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''])
else:
cmd_config.extend(['''--mixed_precision=no'''])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''])
for i, strategy in enumerate(A_):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
| 103 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 1_6
_lowercase : int = 3_2
def snake_case_ ( __SCREAMING_SNAKE_CASE : Accelerator , __SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
lowercase_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase_ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__SCREAMING_SNAKE_CASE : Any ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Dict = 8
else:
lowercase_ : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase_ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Optional[Any] = mocked_dataloaders # noqa: F811
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
lowercase_ : Dict = 2
# New Code #
lowercase_ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase_ : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : Optional[int] = config['''lr''']
lowercase_ : int = int(config['''num_epochs'''] )
lowercase_ : Union[str, Any] = int(config['''seed'''] )
lowercase_ : Dict = int(config['''batch_size'''] )
lowercase_ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[Any] = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Any = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : List[str] = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Dict = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase_ : int = parser.parse_args()
lowercase_ : List[str] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 264 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self :Any ,__lowercase :int ,__lowercase :Union[str, Any]=1_3 ,__lowercase :int=3_0 ,__lowercase :str=2 ,__lowercase :int=3 ,__lowercase :Dict=True ,__lowercase :Tuple=True ,__lowercase :List[Any]=3_2 ,__lowercase :Tuple=2 ,__lowercase :Dict=4 ,__lowercase :int=3_7 ,__lowercase :Dict="gelu" ,__lowercase :Dict=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Optional[int]=1_0 ,__lowercase :Tuple=0.02 ,__lowercase :Optional[int]=3 ,__lowercase :List[Any]=None ,):
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[str] = image_size
snake_case__ : Optional[Any] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[int] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[str] = (image_size // patch_size) ** 2
snake_case__ : Optional[Any] = num_patches + 1
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self :str ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ,__lowercase :Any ,__lowercase :Tuple ):
snake_case__ : Dict = TFViTModel(config=__lowerCAmelCase )
snake_case__ : List[Any] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : int = self.image_size // 2
snake_case__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Dict = model(__lowerCAmelCase ,interpolate_pos_encoding=__lowerCAmelCase ,training=__lowerCAmelCase )
snake_case__ : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :int ,__lowercase :str ,__lowercase :str ):
snake_case__ : Tuple = self.type_sequence_label_size
snake_case__ : List[str] = TFViTForImageClassification(__lowerCAmelCase )
snake_case__ : Dict = model(__lowerCAmelCase ,labels=__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Optional[int] = self.image_size // 2
snake_case__ : List[Any] = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Tuple = model(__lowerCAmelCase ,interpolate_pos_encoding=__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Any = 1
snake_case__ : Dict = TFViTForImageClassification(__lowerCAmelCase )
snake_case__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : int = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = TFViTModelTester(self )
snake_case__ : List[Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCamelCase ( self :Optional[int] ):
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCamelCase ( self :Any ):
pass
def __lowerCamelCase ( self :List[str] ):
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
snake_case__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Layer ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__lowerCAmelCase )
snake_case__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : int = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self :Dict ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self :Any ):
snake_case__ : Any = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case__ : int = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__lowerCAmelCase ,return_tensors='''tf''' )
# forward pass
snake_case__ : Any = model(**__lowerCAmelCase )
# verify the logits
snake_case__ : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
snake_case__ : Optional[Any] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 )
| 230 | """simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : List[Any] = 16
lowerCAmelCase : Union[str, Any] = 32
def A_( A : Accelerator , A : int = 16):
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase = load_dataset('glue' , 'mrpc')
def tokenize_function(A : str):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(A : List[str]):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A)
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def A_( A : Tuple , A : Optional[Any]):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'])
UpperCamelCase = int(config['seed'])
UpperCamelCase = int(config['batch_size'])
UpperCamelCase = evaluate.load('glue' , 'mrpc')
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=A)
def inner_training_loop(A : List[Any]):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(A)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=A)
UpperCamelCase , UpperCamelCase = get_dataloaders(A , A)
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
A , A , A , A , A)
# Now we train the model
for epoch in range(A):
model.train()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
UpperCamelCase = model(**A)
UpperCamelCase = outputs.loss
accelerator.backward(A)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase = model(**A)
UpperCamelCase = outputs.logits.argmax(dim=-1)
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=A , references=A , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A_( ):
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A , A)
if __name__ == "__main__":
main()
| 251 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Tuple = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase : Dict = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : Any = [1, 10_24, 7, 7]
# Image classification docstring
lowerCAmelCase : Optional[Any] = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : List[str] = 'tabby, tabby cat'
lowerCAmelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_( A : Union[str, Any] , A : Optional[Any] , A : Optional[Any]=None):
UpperCamelCase = {}
if isinstance(A , A):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = 'MobilenetV1/Conv2d_0/'
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(A , A):
UpperCamelCase = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def A_( A : int , A : str , A : Optional[int]):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(A)
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(A , A , A)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase = np.transpose(A , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(A , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
UpperCamelCase = torch.from_numpy(A)
tf_weights.pop(A , A)
tf_weights.pop(name + '/RMSProp' , A)
tf_weights.pop(name + '/RMSProp_1' , A)
tf_weights.pop(name + '/ExponentialMovingAverage' , A)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def A_( A : torch.Tensor , A : nn.Convad):
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0)
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0)
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , 'constant' , 0.0)
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ = 1 , A_ = 1 , A_ = False , A_ = True , A_ = True , )-> None:
'''simple docstring'''
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=A_ , out_channels=A_ , kernel_size=A_ , stride=A_ , padding=A_ , groups=A_ , bias=A_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=A_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=A_ , track_running_stats=A_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(A_ , A_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(A_ , self.convolution )
UpperCamelCase = self.convolution(A_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(A_ )
if self.activation is not None:
UpperCamelCase = self.activation(A_ )
return features
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = """mobilenet_v1"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ = True )-> Union[str, Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
A_ , in_channels=config.num_channels , out_channels=A_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=3 , stride=strides[i] , groups=A_ , ) )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase = self.conv_stem(A_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(A_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(A_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=A_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> None:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(A_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=A_ )
UpperCamelCase = nn.Linear(A_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(A_ , output_hidden_states=A_ , return_dict=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(A_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = 'single_label_classification'
else:
UpperCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(A_ , A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A_ , logits=A_ , hidden_states=outputs.hidden_states , )
| 251 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowercase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase :Union[str, Any] = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase :Union[str, Any] = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :Optional[Any] = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__UpperCamelCase :List[Any] = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__UpperCamelCase :List[Any] = imread(_SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase :Any = 0
__UpperCamelCase :List[str] = 0
__UpperCamelCase :Dict = image[x_coordinate][y_coordinate]
__UpperCamelCase :Optional[int] = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase :Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase :Optional[Any] = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 43 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 260 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_SCREAMING_SNAKE_CASE = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(UpperCamelCase_ , UpperCamelCase_ )
requires_backends(UpperCamelCase_ , """sklearn""" )
return (preds == labels).mean()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
warnings.warn(UpperCamelCase_ , UpperCamelCase_ )
requires_backends(UpperCamelCase_ , """sklearn""" )
UpperCamelCase = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
warnings.warn(UpperCamelCase_ , UpperCamelCase_ )
requires_backends(UpperCamelCase_ , """sklearn""" )
UpperCamelCase = pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0]
UpperCamelCase = spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
warnings.warn(UpperCamelCase_ , UpperCamelCase_ )
requires_backends(UpperCamelCase_ , """sklearn""" )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), f"""Predictions and labels have mismatched lengths {len(UpperCamelCase_ )} and {len(UpperCamelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
warnings.warn(UpperCamelCase_ , UpperCamelCase_ )
requires_backends(UpperCamelCase_ , """sklearn""" )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(UpperCamelCase_ )} and {len(UpperCamelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(UpperCamelCase_ )
| 364 | from math import pi
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 165 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "convbert"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=768 , UpperCAmelCase=2 , UpperCAmelCase=9 , UpperCAmelCase=1 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = embedding_size
_snake_case = head_ratio
_snake_case = conv_kernel_size
_snake_case = num_groups
_snake_case = classifier_dropout
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def lowercase (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] ) | 341 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 341 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = torch.permute(_UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ):
# linear layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if "metadata" in layer:
__a = layer.split('''metadata''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__a = layer.split('''kvstore''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__a = layer.split('''/''' )
__a = '''/'''.join(split_layer[:-1] )
__a = (split_layer[-1],)
if "kvstore/path" in layer:
__a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
__a = '''file'''
else:
__a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = rename_keys(_UpperCAmelCase )
__a = {}
for k, v in current_block.items():
__a = v
__a = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME ):
__a = convert_file_size_to_int(_UpperCAmelCase )
__a = []
__a = {}
__a = 0
__a = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__a = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__a = flatten_dict(_UpperCAmelCase , sep='''/''' )
__a = {}
for layer in checkpoint_info.keys():
__a , __a , __a = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if curr_real_layer_name in all_layers:
__a = content
else:
__a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__a = torch.tensor(_UpperCAmelCase )
__a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__a , __a = rename_base_flax_keys(tuple(key.split('''/''' ) ) , _UpperCAmelCase )
__a = '''/'''.join(_UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__a = os.path.join(
_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
__a = {}
__a = 0
__a = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__a = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__a = {}
__a = {}
for idx, shard in enumerate(_UpperCAmelCase ):
__a = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
__a = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
__a = shard
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__snake_case :Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __snake_case ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__a = TaTokenizer.from_pretrained('''t5-small''' )
__a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__a = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
__a = model.generate(_UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 354 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
UpperCamelCase__ : int
UpperCamelCase__ : TreeNode | None = None
UpperCamelCase__ : TreeNode | None = None
__snake_case :Optional[Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __snake_case ( _UpperCAmelCase ):
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__a , __a = get_distrib(node.left )
__a , __a = get_distrib(node.right )
__a = 1 - left_distrib_excess
__a = 1 - right_distrib_excess
__a = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
__a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase_ ( _UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_UpperCAmelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
_UpperCAmelCase : Dict = QuantumRegister(_UpperCAmelCase , "qr" )
_UpperCAmelCase : Any = ClassicalRegister(_UpperCAmelCase , "cr" )
_UpperCAmelCase : int = QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : List[Any] = number_of_qubits
for i in range(_UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCAmelCase , _UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCAmelCase , _UpperCAmelCase )
# simulate with 10000 shots
_UpperCAmelCase : str = Aer.get_backend("qasm_simulator" )
_UpperCAmelCase : Dict = execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_000 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 31 | '''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_UpperCAmelCase : List[Any] = MaskFormerConfig(backbone_config=_UpperCAmelCase )
_UpperCAmelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_UpperCAmelCase : Dict = 847
_UpperCAmelCase : Any = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_UpperCAmelCase : Any = 150
_UpperCAmelCase : Any = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_UpperCAmelCase : Tuple = 171
_UpperCAmelCase : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_UpperCAmelCase : Any = 133
_UpperCAmelCase : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 19
_UpperCAmelCase : str = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 65
_UpperCAmelCase : Tuple = "mapillary-vistas-id2label.json"
_UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : List[str] = val
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_UpperCAmelCase : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[:dim, :]
_UpperCAmelCase : Tuple = in_proj_bias[: dim]
_UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : int = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : int = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Tuple = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : Optional[int] = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = get_maskformer_config(_UpperCAmelCase )
# load original state_dict
with open(_UpperCAmelCase , "rb" ) as f:
_UpperCAmelCase : Optional[int] = pickle.load(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCAmelCase : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_UpperCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
# load 🤗 model
_UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(_UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCAmelCase , param.shape )
_UpperCAmelCase , _UpperCAmelCase : Any = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_UpperCAmelCase : Optional[int] = prepare_img()
if "vistas" in model_name:
_UpperCAmelCase : int = 65
elif "cityscapes" in model_name:
_UpperCAmelCase : Tuple = 65_535
else:
_UpperCAmelCase : Any = 255
_UpperCAmelCase : Optional[Any] = True if "ade" in model_name else False
_UpperCAmelCase : Optional[int] = MaskFormerImageProcessor(ignore_index=_UpperCAmelCase , reduce_labels=_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = image_processor(_UpperCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = model(**_UpperCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCAmelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 31 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase_ (_a ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[str] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Dict , ) -> List[str]:
super().__init__(**snake_case_ )
UpperCAmelCase_ : List[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase_ : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="crop_size" )
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : Tuple = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : Dict = get_resize_output_image_size(snake_case_ , size=size["shortest_edge"] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> Optional[Any]:
UpperCAmelCase_ : str = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[int]:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> Tuple:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> Optional[int]:
UpperCAmelCase_ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase_ : List[Any] = get_size_dict(snake_case_ , param_name="size" , default_to_square=snake_case_ )
UpperCAmelCase_ : int = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(snake_case_ , param_name="crop_size" , default_to_square=snake_case_ )
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Any = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Union[str, Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : Tuple = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : int = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Optional[int] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : str = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : List[Any] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
UpperCAmelCase_ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 371 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 253 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 |
import os
import string
import sys
lowerCamelCase = 1 << 8
lowerCamelCase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCamelCase = KEYMAP['''up''']
lowerCamelCase = KEYMAP['''left''']
if sys.platform == "win32":
lowerCamelCase = []
lowerCamelCase = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase = ord(str(i))
def UpperCAmelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
a__ ='''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
a__ =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
a__ =chr(KEYMAP['''esc'''] )
except KeyError:
a__ =cha[1]
else:
a__ =ch.decode(_A )
else:
a__ =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ =sys.stdin.fileno()
a__ =termios.tcgetattr(_A )
try:
tty.setraw(_A )
a__ =sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
a__ =get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
a__ =get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class a__ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'text': Value('string' )} )
A__ : ClassVar[Features] = Features({'summary': Value('string' )} )
A__ : str = "text"
A__ : str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 197 | import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]:
__a = parent
__a = config_class
__a = has_text_modality
__a = kwargs
__a = common_properties
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.config_class(**self.inputs_dict )
__a = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
__a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , 'config.json' )
config_first.to_json_file(UpperCAmelCase )
__a = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.config_class(**self.inputs_dict )
__a = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.config_class.is_composition:
return
__a = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = copy.deepcopy(UpperCAmelCase )
__a = self.config_class(**UpperCAmelCase )
__a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__a = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 197 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__a : List[str] = None
__a : int = logging.get_logger(__name__)
__a : Tuple = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a : Dict = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
__a : Any = {
"""google/rembert""": 2_5_6,
}
__a : Tuple = """▁"""
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = RemBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
__lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
__lowercase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,) | 210 | import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
__lowercase = psutil.Process()
__lowercase = False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = -1
while True:
__lowercase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = threading.Thread(target=self.peak_monitor )
__lowercase = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = False
self.thread.join()
return self.cpu_memory_peak
__a : List[str] = PeakCPUMemory()
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = (torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
__lowercase = (torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB" )
__lowercase = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" ) | 210 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[str]
__SCREAMING_SNAKE_CASE : Optional[List[str]]
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
__SCREAMING_SNAKE_CASE : Tuple = 'dev'
__SCREAMING_SNAKE_CASE : Optional[int] = 'test'
class UpperCAmelCase_ :
@staticmethod
def snake_case_ ( A : Union[str, Any] , A : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : str ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : List[InputExample] , A : List[str] , A : int , A : PreTrainedTokenizer , A : Optional[int]=False , A : List[str]="[CLS]" , A : List[Any]=1 , A : str="[SEP]" , A : int=False , A : int=False , A : Any=0 , A : List[str]=0 , A : Dict=-1_0_0 , A : str=0 , A : Optional[Any]=True , ):
_UpperCAmelCase : Dict = {label: i for i, label in enumerate(A )}
_UpperCAmelCase : str = []
for ex_index, example in enumerate(A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , A , len(A ) )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
for word, label in zip(example.words , example.labels ):
_UpperCAmelCase : str = tokenizer.tokenize(A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A ) > 0:
tokens.extend(A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add()
if len(A ) > max_seq_length - special_tokens_count:
_UpperCAmelCase : List[Any] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase : Dict = [sequence_a_segment_id] * len(A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase : str = [cls_token] + tokens
_UpperCAmelCase : Dict = [pad_token_label_id] + label_ids
_UpperCAmelCase : Any = [cls_token_segment_id] + segment_ids
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase : List[Any] = [1 if mask_padding_with_zero else 0] * len(A )
# Zero-pad up to the sequence length.
_UpperCAmelCase : List[str] = max_seq_length - len(A )
if pad_on_left:
_UpperCAmelCase : str = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase : Any = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : Dict = None
features.append(
InputFeatures(
input_ids=A , attention_mask=A , token_type_ids=A , label_ids=A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : List[str]=False , A : Split = Split.train , ):
# Load data features from cache or dataset file
_UpperCAmelCase : int = os.path.join(
A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_UpperCAmelCase : Tuple = torch.load(A )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_UpperCAmelCase : List[str] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[Any] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , A )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : List[str] , A : Optional[Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = -1_0_0
def __init__( self : Tuple , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : Optional[Any]=False , A : Split = Split.train , ):
_UpperCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[str] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : List[str] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case_ ( self : str ):
_UpperCAmelCase : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : int , A : int ):
return self.features[i]
| 202 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = 'owlvit_text_model'
def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = initializer_factor
@classmethod
def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model'
def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ):
super().__init__(**A )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = initializer_factor
@classmethod
def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'owlvit'
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ):
super().__init__(**A )
if text_config is None:
_UpperCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : str = OwlViTTextConfig(**A )
_UpperCAmelCase : int = OwlViTVisionConfig(**A )
_UpperCAmelCase : Optional[Any] = projection_dim
_UpperCAmelCase : str = logit_scale_init_value
_UpperCAmelCase : Optional[Any] = return_dict
_UpperCAmelCase : str = 1.0
@classmethod
def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
@classmethod
def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : Dict = vision_config
return cls.from_dict(A , **A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _UpperCamelCase ):
@property
def snake_case_ ( self : List[str] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def snake_case_ ( self : Optional[int] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def snake_case_ ( self : str ):
return 1e-4
def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ):
_UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self : List[Any] ):
return 1_4
| 202 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( a ):
def __init__( self : str , _snake_case : NestedDataStructureLike[PathLike] , _snake_case : Optional[NamedSplit] = None , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[int] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
super().__init__(
_snake_case , split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , )
UpperCAmelCase_ = path_or_paths if isinstance(_snake_case , _snake_case) else {self.split: path_or_paths}
UpperCAmelCase_ = Text(
cache_dir=_snake_case , data_files=_snake_case , features=_snake_case , **_snake_case , )
def lowerCamelCase ( self : int):
"""simple docstring"""
if self.streaming:
UpperCAmelCase_ = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , )
UpperCAmelCase_ = self.builder.as_dataset(
split=self.split , verification_mode=_snake_case , in_memory=self.keep_in_memory)
return dataset
| 51 |
from string import ascii_lowercase, ascii_uppercase
def a__ ( _UpperCamelCase : str ):
if not sentence:
return ""
__lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 330 | 0 |
"""simple docstring"""
import string
import numpy
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return b if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase__ )
class lowercase__ :
'''simple docstring'''
UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase = numpy.vectorize(lambda SCREAMING_SNAKE_CASE : x % 36 )
UpperCamelCase = numpy.vectorize(SCREAMING_SNAKE_CASE )
def __init__( self : Optional[Any] , _UpperCAmelCase : numpy.ndarray ) -> None:
'''simple docstring'''
UpperCAmelCase_ = self.modulus(_UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase_ = encrypt_key.shape[0]
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
return self.key_string.index(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return self.key_string[round(_UpperCAmelCase )]
def lowercase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ = det % len(self.key_string )
UpperCAmelCase_ = len(self.key_string )
if greatest_common_divisor(_UpperCAmelCase , len(self.key_string ) ) != 1:
UpperCAmelCase_ = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase_ = chars[-1]
while len(_UpperCAmelCase ) % self.break_key != 0:
chars.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.process_text(text.upper() )
UpperCAmelCase_ = ""
for i in range(0 , len(_UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ = text[i : i + self.break_key]
UpperCAmelCase_ = [self.replace_letters(_UpperCAmelCase ) for char in batch]
UpperCAmelCase_ = numpy.array([vec] ).T
UpperCAmelCase_ = self.modulus(self.encrypt_key.dot(_UpperCAmelCase ) ).T.tolist()[
0
]
UpperCAmelCase_ = "".join(
self.replace_digits(_UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowercase__ ( self : Dict ) -> numpy.ndarray:
'''simple docstring'''
UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase_ = det % len(self.key_string )
UpperCAmelCase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase_ = i
break
UpperCAmelCase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_UpperCAmelCase ) )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.make_decrypt_key()
UpperCAmelCase_ = self.process_text(text.upper() )
UpperCAmelCase_ = ""
for i in range(0 , len(_UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase_ = text[i : i + self.break_key]
UpperCAmelCase_ = [self.replace_letters(_UpperCAmelCase ) for char in batch]
UpperCAmelCase_ = numpy.array([vec] ).T
UpperCAmelCase_ = self.modulus(decrypt_key.dot(_UpperCAmelCase ) ).T.tolist()[0]
UpperCAmelCase_ = "".join(
self.replace_digits(_UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a__ ( ):
UpperCAmelCase_ = int(input("Enter the order of the encryption key: " ) )
UpperCAmelCase_ = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(lowerCAmelCase__ ):
UpperCAmelCase_ = [int(lowerCAmelCase__ ) for x in input().split()]
hill_matrix.append(lowerCAmelCase__ )
UpperCAmelCase_ = HillCipher(numpy.array(lowerCAmelCase__ ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
UpperCAmelCase_ = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
UpperCAmelCase_ = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(lowerCAmelCase__ ) )
elif option == "2":
UpperCAmelCase_ = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 241 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCamelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowerCamelCase = """zero2"""
lowerCamelCase = """zero3"""
lowerCamelCase = [ZEROa, ZEROa]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
UpperCAmelCase_ = parameterized.to_safe_name("_".join(str(lowerCAmelCase__ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = models[model]
UpperCAmelCase_ = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir("./xxx" , after=_UpperCAmelCase )
UpperCAmelCase_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
UpperCAmelCase_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
UpperCAmelCase_ = self.get_launcher(_UpperCAmelCase )
UpperCAmelCase_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def lowercase__ ( self : List[Any] , _UpperCAmelCase : int=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 241 | 1 |
'''simple docstring'''
import math
import unittest
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ), 'Zero doesn\'t have any positive factors, primes must have exactly two.', )
self.assertFalse(
is_prime(1 ), 'One only has 1 positive factor, primes must have exactly two.', )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 251 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"]
UpperCamelCase_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__a =LEDTokenizer
__a =LEDTokenizerFast
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def UpperCamelCase__ ( self : Dict , **__a : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : Tuple , **__a : int ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Any] ):
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self : List[Any] ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCamelCase__ ( self : int ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__a , max_length=len(__a ) , padding=__a , return_tensors="pt" )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(__a , __a )
@require_torch
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__a , padding=__a , return_tensors="pt" )
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertNotIn("labels" , __a )
self.assertNotIn("decoder_attention_mask" , __a )
@require_torch
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=__a , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__a , truncation=__a , return_tensors="pt" )
self.assertIsInstance(__a , __a )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def UpperCamelCase__ ( self : Any ):
_a = ["A long paragraph for summarization."]
_a = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__a , return_tensors="pt" )
_a = tokenizer(text_target=__a , return_tensors="pt" )
_a = inputs["input_ids"]
_a = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase__ ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ["Summary of the text.", "Another summary."]
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(__a , padding=__a )
_a = [[0] * len(__a ) for x in encoded_output["input_ids"]]
_a = tokenizer.pad(__a )
self.assertSequenceEqual(outputs["global_attention_mask"] , __a )
def UpperCamelCase__ ( self : List[str] ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_a = self.tokenizer_class.from_pretrained(__a , **__a )
_a = "A, <mask> AllenNLP sentence."
_a = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
_a = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 354 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 187 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 0 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = 8 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A )
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
lowercase = pad_size
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = get_image_size(_A )
lowercase = (old_height // size + 1) * size - old_height
lowercase = (old_width // size + 1) * size - old_width
return pad(_A , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_A )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_pad if do_pad is not None else self.do_pad
lowercase = pad_size if pad_size is not None else self.pad_size
lowercase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(_A ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_pad:
lowercase = [self.pad(_A , size=_A ) for image in images]
lowercase = [to_channel_dimension_format(_A , _A ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 356 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
__A = 1, 1
for _ in range(number_of_steps - 1 ):
__A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Any = field
A_ : Optional[int] = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : str = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : Optional[Any] = None
A_ : Optional[Any] = None
A_ : Tuple = None
A_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Union[str, Any] = dataset
A_ : Optional[int] = path_or_buf
A_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : List[str] = num_proc
A_ : Union[str, Any] = """utf-8"""
A_ : Dict = to_json_kwargs
def _a (self ):
A_ : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : str = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : List[str] = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : List[Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : Dict = args
A_ : Any = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Union[str, Any] = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 206 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=False, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=33, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> Dict:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case = ids_tensor([self.batch_size], self.num_choices )
snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> Optional[int]:
return EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]:
snake_case = EsmModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, attention_mask=lowercase_ )
snake_case = model(lowercase_ )
snake_case = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
snake_case = EsmForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]:
snake_case = self.num_labels
snake_case = EsmForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = False
snake_case_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = ()
snake_case_ = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def _lowerCamelCase ( self ) -> str:
snake_case = EsmModelTester(self )
snake_case = ConfigTester(self, config_class=lowercase_, hidden_size=37 )
def _lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCamelCase ( self ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCamelCase ( self ) -> Dict:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = EsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()[0]
snake_case = EsmEmbeddings(config=lowercase_ )
snake_case = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
snake_case = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
snake_case = create_position_ids_from_input_ids(lowercase_, model.padding_idx )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_, lowercase_ ) ) )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()[0]
snake_case = EsmEmbeddings(config=lowercase_ )
snake_case = torch.empty(2, 4, 30 )
snake_case = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
snake_case = torch.as_tensor([expected_single_positions, expected_single_positions] )
snake_case = embeddings.create_position_ids_from_inputs_embeds(lowercase_ )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_, lowercase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCamelCase ( self ) -> Tuple:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCamelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ) -> Any:
pass
@require_torch
class lowerCamelCase ( __lowerCAmelCase ):
@slow
def _lowerCamelCase ( self ) -> str:
with torch.no_grad():
snake_case = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case = model(lowercase_ )[0]
snake_case = 33
snake_case = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape, lowercase_ )
snake_case = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
with torch.no_grad():
snake_case = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case = model(lowercase_ )[0]
# compare the actual values for a slice.
snake_case = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
'''simple docstring'''
from math import factorial
A_ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ = 60 , lowerCAmelCase_ = 1000000 )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
_UpperCAmelCase : List[str] = 0
# the cached sizes of the previous chains
_UpperCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , lowerCAmelCase_ ):
# The temporary set will contain the elements of the chain
_UpperCAmelCase : List[Any] = set()
_UpperCAmelCase : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_UpperCAmelCase : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCAmelCase_ )
chain_set_length += 1
_UpperCAmelCase : Optional[Any] = digit_factorial_sum(lowerCAmelCase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_UpperCAmelCase : int = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 215 |
'''simple docstring'''
from __future__ import annotations
import queue
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> str:
_UpperCAmelCase : Optional[Any] = data
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Union[str, Any] = None
def snake_case_ ( )-> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
_UpperCAmelCase : Any = input("""Enter the value of the root node: """ ).strip().lower()
_UpperCAmelCase : queue.Queue = queue.Queue()
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : str = q.get()
_UpperCAmelCase : Any = F'''Enter the left node of {node_found.data}: '''
_UpperCAmelCase : Union[str, Any] = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[int] = left_node
q.put(lowerCAmelCase_ )
_UpperCAmelCase : Dict = F'''Enter the right node of {node_found.data}: '''
_UpperCAmelCase : Tuple = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : Any = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = right_node
q.put(lowerCAmelCase_ )
raise
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Optional[int] = []
while not q.empty():
_UpperCAmelCase : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase : int = stack.pop()
# start to traverse its right child
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = n.left
_UpperCAmelCase : Union[str, Any] = stack.pop()
print(n.data , end=""",""" )
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase ,_UpperCAmelCase : str = [], []
_UpperCAmelCase : Dict = node
stacka.append(lowerCAmelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCAmelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ = "" , lowerCAmelCase_=50 , lowerCAmelCase_="*" )-> str:
'''simple docstring'''
if not s:
return "\n" + width * char
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = divmod(width - len(lowerCAmelCase_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
A_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 215 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCamelCase__ = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCamelCase__ = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def _UpperCamelCase (a__ :Optional[Any] , a__ :Any ):
"""simple docstring"""
return float((preds == labels).mean() )
def _UpperCamelCase (a__ :Dict , a__ :Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = simple_accuracy(a__ , a__ )
UpperCamelCase__ = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase (a__ :List[Any] , a__ :Any ):
"""simple docstring"""
UpperCamelCase__ = np.array(a__ )
UpperCamelCase__ = np.array(a__ )
UpperCamelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCamelCase__ = en_sentvecs - np.mean(a__ , axis=0 )
UpperCamelCase__ = in_sentvecs - np.mean(a__ , axis=0 )
UpperCamelCase__ = cdist(a__ , a__ , """cosine""" )
UpperCamelCase__ = np.array(range(a__ ) )
UpperCamelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCamelCase__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _lowerCamelCase ( self ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowerCAmelCase , __lowerCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 87 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = """xlnet"""
snake_case : Optional[Any] = ["""mems"""]
snake_case : Any = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowerCAmelCase=32000 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=True , __lowerCAmelCase="bi" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase="last" , __lowerCAmelCase=True , __lowerCAmelCase="tanh" , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase__ = d_model // n_head
UpperCamelCase__ = ff_activation
UpperCamelCase__ = d_inner
UpperCamelCase__ = untie_r
UpperCamelCase__ = attn_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dropout
UpperCamelCase__ = mem_len
UpperCamelCase__ = reuse_len
UpperCamelCase__ = bi_data
UpperCamelCase__ = clamp_len
UpperCamelCase__ = same_length
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_last_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
UpperCamelCase__ = kwargs["""use_cache"""]
UpperCamelCase__ = use_mems_eval
UpperCamelCase__ = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowerCamelCase ( self , __lowerCAmelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 87 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
lowercase__ : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ : Optional[int] = np.zeros((n + 1,) )
lowercase__ : List[Any] = ya
lowercase__ : Optional[Any] = xa
for k in range(__lowerCamelCase ):
lowercase__ : str = y[k] + step_size * ode_func(__lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import string
def lowerCamelCase__ ( _lowerCamelCase : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase_ = string.ascii_uppercase.find(_lowerCamelCase )
lowerCamelCase_ = num - key
if num < 0:
lowerCamelCase_ = num + len(string.ascii_uppercase )
lowerCamelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCamelCase_ = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = input('Encrypted message: ' )
lowerCamelCase_ = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 183 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ ={
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ ={
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
lowerCAmelCase__ ={
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = SqueezeBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
SCREAMING_SNAKE_CASE_ : Any = strip_accents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : Dict = normalizer_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = do_lower_case
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 366 |
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ : Tuple = len(A__ ) if (len(A__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ), 'Stack'.center(A__ ), 'Postfix'.center(A__ ), sep=' | ', )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A__ ) == 0:
stack.append(A__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A__ ) # push x to stack
print(
x.center(8 ), (''.join(A__ )).ljust(A__ ), (''.join(A__ )).ljust(A__ ), sep=' | ', ) # Output in tabular format
while len(A__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ), (''.join(A__ )).ljust(A__ ), (''.join(A__ )).ljust(A__ ), sep=' | ', ) # Output in tabular format
return "".join(A__ ) # return Postfix as str
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A__ ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ : Optional[int] = ')' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ : Optional[int] = '(' # change ")" to "("
return (infix_2_postfix(''.join(A__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ : str =input('\nEnter an Infix Equation = ') # Input an Infix equation
lowerCAmelCase__ : Optional[Any] =''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 162 | 0 |
'''simple docstring'''
def snake_case__ ( ) -> int:
'''simple docstring'''
return 1
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_A )
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_A )
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_A )
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_A )
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_A )
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_A )
def snake_case__ ( _A: int = 200 ) -> int:
'''simple docstring'''
return two_pound(_A )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 272 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 283 |
'''simple docstring'''
import numpy
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase__ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase__ : Optional[int] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase__ : Any = numpy.random.rand(3 , 1)
# Real output values provided.
UpperCAmelCase__ : Tuple = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase__ : Union[str, Any] = numpy.zeros(output_array.shape)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
UpperCAmelCase__ : str = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
UpperCAmelCase__ : Any = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
for iteration in range(1 , iterations + 1):
UpperCAmelCase__ : Optional[Any] = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase__ : str = numpy.mean(numpy.square(output - self.feedforward()))
print(f'''Iteration {iteration} Loss: {loss}''')
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = input_arr
UpperCAmelCase__ : Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
UpperCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
UpperCAmelCase__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def _UpperCamelCase ( UpperCamelCase__ ):
return 1 / (1 + numpy.exp(-value ))
def _UpperCamelCase ( UpperCamelCase__ ):
return (value) * (1 - (value))
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase__ : str = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase__ , output_array=UpperCamelCase__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase__ , iterations=1_0 , give_loss=UpperCamelCase__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 283 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> List[Any]:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : Any ) -> Dict:
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__lowerCamelCase = value
else:
__lowerCamelCase = value
return new_state_dict
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]:
__lowerCamelCase = ''''''
if is_panoptic:
__lowerCamelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__lowerCamelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
def __magic_name__ ( ) -> Union[str, Any]:
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Optional[int]:
__lowerCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCamelCase = '''resnet101'''
if "dc5" in model_name:
__lowerCamelCase = True
__lowerCamelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCamelCase = 250
else:
__lowerCamelCase = 91
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''coco-detection-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCamelCase = ConditionalDetrImageProcessor(format=__lowerCAmelCase )
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
__lowerCamelCase = encoding['''pixel_values''']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
__lowerCamelCase = torch.hub.load('''DeppMeng/ConditionalDETR''' , __lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
__lowerCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCamelCase = '''conditional_detr.''' + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = ConditionalDetrForSegmentation(__lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=__lowerCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__lowerCamelCase = conditional_detr(__lowerCAmelCase )
__lowerCamelCase = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 270 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __a :
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any]=None , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Tuple="resnet50" , lowercase_ : str=3 , lowercase_ : Optional[int]=32 , lowercase_ : str=3 , lowercase_ : Any=True , lowercase_ : int=True , ):
UpperCamelCase__ : Dict =parent
UpperCamelCase__ : Dict =out_indices if out_indices is not None else [4]
UpperCamelCase__ : Union[str, Any] =stage_names
UpperCamelCase__ : int =out_features
UpperCamelCase__ : int =backbone
UpperCamelCase__ : Optional[Any] =batch_size
UpperCamelCase__ : Optional[int] =image_size
UpperCamelCase__ : Any =num_channels
UpperCamelCase__ : List[str] =use_pretrained_backbone
UpperCamelCase__ : int =is_training
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] =self.get_config()
return config, pixel_values
def _lowerCAmelCase ( self : Any ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowerCAmelCase ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
UpperCamelCase__ : str =TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Tuple =model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[Any] =config_and_inputs
UpperCamelCase__ : int ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __a ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : int =TimmBackboneModelTester(self )
UpperCamelCase__ : str =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _lowerCAmelCase ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : List[Any] ='''resnet18'''
UpperCamelCase__ : List[Any] ='''microsoft/resnet-18'''
UpperCamelCase__ : Any =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ )
UpperCamelCase__ : List[Any] =AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCamelCase__ : Optional[int] =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3] )
UpperCamelCase__ : Any =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowerCAmelCase ( self : int ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _lowerCAmelCase ( self : Any ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowerCAmelCase ( self : int ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowerCAmelCase ( self : Any ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _lowerCAmelCase ( self : Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase ( self : List[str] ):
pass
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] =model_class(lowercase_ )
UpperCamelCase__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Dict =[*signature.parameters.keys()]
UpperCamelCase__ : Union[str, Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : List[str] =True
UpperCamelCase__ : Optional[Any] =self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCamelCase__ : Optional[Any] =self.all_model_classes[0]
UpperCamelCase__ : Optional[Any] =model_class(lowercase_ )
model.to(lowercase_ )
UpperCamelCase__ : List[Any] =self._prepare_for_class(lowercase_ , lowercase_ )
UpperCamelCase__ : List[str] =model(**lowercase_ )
UpperCamelCase__ : Tuple =outputs[0][-1]
# Encoder-/Decoder-only models
UpperCamelCase__ : Optional[Any] =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCamelCase__ : Any =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[Any] =model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCamelCase__ : str =copy.deepcopy(lowercase_ )
UpperCamelCase__ : Any =None
UpperCamelCase__ : Optional[Any] =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Union[str, Any] =model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCamelCase__ : Dict =copy.deepcopy(lowercase_ )
UpperCamelCase__ : str =False
UpperCamelCase__ : List[str] =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[int] =model(**lowercase_ )
| 361 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
UpperCamelCase__ : List[str] =kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.image_processor
def __call__( self : Optional[int] , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCamelCase__ : str =self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCamelCase__ : Optional[int] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _lowerCAmelCase ( self : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[str] =self.tokenizer.model_input_names
UpperCamelCase__ : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : Any ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
| 157 | 0 |
def _A ( _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCamelCase = set()
return any(
node not in visited and depth_first_search(_lowercase , _lowercase , _lowercase , _lowercase )
for node in graph )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
visited.add(_lowercase )
rec_stk.add(_lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowercase , _lowercase , _lowercase , _lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 310 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( _a ):
"""simple docstring"""
def wrapper(*_a , **_a ):
lowerCAmelCase__ : List[str] = timeit.default_timer()
lowerCAmelCase__ : List[Any] = func(*_a , **_a )
lowerCAmelCase__ : Any = timeit.default_timer() - starttime
return delta
lowerCAmelCase__ : Any = func.__name__
return wrapper
def lowerCamelCase_ ( _a , _a=100 , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : str = seq_shapes or {}
for i in range(_a ):
lowerCAmelCase__ : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_a , _ArrayXD ):
lowerCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_a , datasets.Value ):
if v.dtype == "string":
lowerCAmelCase__ : Dict = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase__ : Any = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_a , datasets.Sequence ):
while isinstance(_a , datasets.Sequence ):
lowerCAmelCase__ : Optional[int] = v.feature
lowerCAmelCase__ : str = seq_shapes[k]
lowerCAmelCase__ : Any = np.random.rand(*_a ).astype(v.dtype )
lowerCAmelCase__ : int = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( _a , _a , _a=100 , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = generate_examples(_a , num_examples=_a , seq_shapes=_a )
with ArrowWriter(features=_a , path=_a ) as writer:
for key, record in dummy_data:
lowerCAmelCase__ : Optional[int] = features.encode_example(_a )
writer.write(_a )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCAmelCase__ : List[Any] = datasets.Dataset.from_file(filename=_a , info=datasets.DatasetInfo(features=_a ) )
return dataset
| 211 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 211 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE__ ( self ):
return self.__class__(**{k: copy.deepcopy(_lowerCamelCase ) for k, v in self.__dict__.items()} )
| 94 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''switch_transformers'''
snake_case__ : Optional[int] = ['''past_key_values''']
snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
a_ : Optional[int] = vocab_size
a_ : List[str] = d_model
a_ : Tuple = d_kv
a_ : Optional[Any] = d_ff
a_ : List[Any] = num_sparse_encoder_layers
a_ : Any = num_layers
a_ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : Dict = num_heads
a_ : str = num_experts
a_ : Any = expert_capacity
a_ : List[Any] = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : int = router_ignore_padding_tokens
a_ : Any = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : Optional[Any] = dropout_rate
a_ : Tuple = layer_norm_epsilon
a_ : Dict = initializer_factor
a_ : Any = feed_forward_proj
a_ : Tuple = use_cache
a_ : str = add_router_probs
a_ : Optional[int] = router_z_loss_coef
a_ : List[str] = router_aux_loss_coef
a_ : int = self.feed_forward_proj.split('-' )
a_ : int = act_info[-1]
a_ : Optional[int] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Any = 'gelu_new'
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 32 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
UpperCamelCase__ = model.generate(_a , max_new_tokens=10 , do_sample=_a )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(_a , _a )
def A_ ( self : str ):
UpperCamelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
UpperCamelCase__ = model.generate(_a , max_new_tokens=10 , do_sample=_a )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
UpperCamelCase__ = TextIteratorStreamer(_a )
UpperCamelCase__ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=_a )
thread.start()
UpperCamelCase__ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
UpperCamelCase__ = model.generate(_a , max_new_tokens=10 , do_sample=_a )
UpperCamelCase__ = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(_a , _a )
def A_ ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase__ = AutoTokenizer.from_pretrained('''distilgpt2''' )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_a )
UpperCamelCase__ = -1
UpperCamelCase__ = torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ = tokenizer(_a , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
UpperCamelCase__ = TextIteratorStreamer(_a , timeout=0.001 )
UpperCamelCase__ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
UpperCamelCase__ = ""
for new_text in streamer:
streamer_text += new_text
| 369 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : list[str] | None = None , _lowerCamelCase : dict[str, float] | None = None , _lowerCamelCase : bool = False , ) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = cipher_alphabet or [chr(snake_case_) for i in range(97 , 123)]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__UpperCamelCase : Optional[Any] = {
"a": 0.0_8_4_9_7,
"b": 0.0_1_4_9_2,
"c": 0.0_2_2_0_2,
"d": 0.0_4_2_5_3,
"e": 0.1_1_1_6_2,
"f": 0.0_2_2_2_8,
"g": 0.0_2_0_1_5,
"h": 0.0_6_0_9_4,
"i": 0.0_7_5_4_6,
"j": 0.0_0_1_5_3,
"k": 0.0_1_2_9_2,
"l": 0.0_4_0_2_5,
"m": 0.0_2_4_0_6,
"n": 0.0_6_7_4_9,
"o": 0.0_7_5_0_7,
"p": 0.0_1_9_2_9,
"q": 0.0_0_0_9_5,
"r": 0.0_7_5_8_7,
"s": 0.0_6_3_2_7,
"t": 0.0_9_3_5_6,
"u": 0.0_2_7_5_8,
"v": 0.0_0_9_7_8,
"w": 0.0_2_5_6_0,
"x": 0.0_0_1_5_0,
"y": 0.0_1_9_9_4,
"z": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
__UpperCamelCase : Any = frequencies_dict
if not case_sensitive:
__UpperCamelCase : int = ciphertext.lower()
# Chi squared statistic values
__UpperCamelCase : Any = {}
# cycle through all of the shifts
for shift in range(len(snake_case_)):
__UpperCamelCase : int = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__UpperCamelCase : Optional[int] = (alphabet_letters.index(letter.lower()) - shift) % len(
snake_case_)
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__UpperCamelCase : List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__UpperCamelCase : List[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__UpperCamelCase : str = decrypted_with_shift.lower().count(snake_case_)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__UpperCamelCase : Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__UpperCamelCase : Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__UpperCamelCase : Tuple = decrypted_with_shift.count(snake_case_)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__UpperCamelCase : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__UpperCamelCase : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__UpperCamelCase : Tuple = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCamelCase : int) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__UpperCamelCase : Union[str, Any] = min(
snake_case_ , key=snake_case_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 232 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ):
__UpperCAmelCase = sorted(numsa + numsa )
__UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 332 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase = "lm_head"
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2]
UpperCamelCase = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCamelCase = "weight_g"
elif "weight_v" in name:
UpperCamelCase = "weight_v"
elif "bias" in name:
UpperCamelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = "weight"
else:
UpperCamelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = full_name.split("conv_layers." )[-1]
UpperCamelCase = name.split("." )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
if config_path is not None:
UpperCamelCase = UniSpeechConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load_from_json(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "vocab.json" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 42
UpperCamelCase = 43
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = WavaVecaPhonemeCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = True if config.feat_extract_norm == "layer" else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = UniSpeechForCTC(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = UniSpeechForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 244 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "facebook/bart-large-mnli"
UpperCAmelCase_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
UpperCAmelCase_ = "text_classifier"
UpperCAmelCase_ = AutoTokenizer
UpperCAmelCase_ = AutoModelForSequenceClassification
UpperCAmelCase_ = ["text", ["text"]]
UpperCAmelCase_ = ["text"]
def snake_case_ (self ) -> List[Any]:
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase = int(__a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case_ (self , __a , __a ) -> List[Any]:
UpperCamelCase = labels
return self.pre_processor(
[text] * len(__a ) , [F"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case_ (self , __a ) -> int:
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 244 | 1 |
UpperCamelCase = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowercase__ : Optional[Any] = 0
lowercase__ : Any = 0
while place < len(_lowerCamelCase):
if (place + 1 < len(_lowerCamelCase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Union[str, Any] = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : Union[str, Any] = divmod(_lowerCamelCase , _lowerCamelCase)
result.append(roman * factor)
if number == 0:
break
return "".join(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list:
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCAmelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCAmelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 239 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a (metaclass=UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["""speech"""]
def __init__( self : Tuple , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> int:
requires_backends(self , ["speech"] )
class a (metaclass=UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""speech"""]
def __init__( self : List[str] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(self , ["speech"] )
| 368 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase : Any , **lowerCamelCase : Tuple ) -> int:
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any]=None ) -> Optional[int]:
__snake_case : Optional[Any] = {}
if top_k is not None:
__snake_case : List[Any] = top_k
return {}, {}, postprocess_params
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase : Dict ) -> Optional[int]:
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
__snake_case : Any = load_image(lowerCamelCase )
__snake_case : str = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : int , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : List[Any] = self.model(**lowerCamelCase )
return model_outputs
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
__snake_case : int = self.model.config.num_labels
if self.framework == "pt":
__snake_case : Optional[Any] = model_outputs.logits.softmax(-1 )[0]
__snake_case , __snake_case : List[str] = probs.topk(lowerCamelCase )
elif self.framework == "tf":
__snake_case : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
__snake_case : Optional[Any] = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
__snake_case , __snake_case : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__snake_case : Any = scores.tolist()
__snake_case : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 134 | 0 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase : set[int] = set()
lowerCAmelCase : int
lowerCAmelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a__ ( SCREAMING_SNAKE_CASE : int = 5_0_0_0 ):
'''simple docstring'''
for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ):
if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 108 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = "" , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase : str = is_leaf
lowerCAmelCase : str = prefix
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for word in words:
self.insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.prefix == word:
lowerCAmelCase : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase : Optional[Any] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
lowerCAmelCase : Tuple = self.nodes[word[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase : Optional[Any] = remaining_prefix
lowerCAmelCase : int = self.nodes[matching_string[0]]
lowerCAmelCase : List[Any] = RadixNode(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = aux_node
if remaining_word == "":
lowerCAmelCase : Optional[int] = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase : List[str] = list(self.nodes.values() )[0]
lowerCAmelCase : List[str] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase : Tuple = merging_node.nodes
return True
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase : List[str] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = RadixNode()
lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE )
print("Words:" , SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 108 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0.9_9_9, _UpperCamelCase="cosine", ) ->Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase : List[str] = []
for i in range(_UpperCamelCase ):
lowercase : List[str] = i / num_diffusion_timesteps
lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ), _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase, dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : Any = [e.name for e in KarrasDiffusionSchedulers]
A : Dict = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 1000 , SCREAMING_SNAKE_CASE__ = 0.00085 , SCREAMING_SNAKE_CASE__ = 0.012 , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "epsilon" , SCREAMING_SNAKE_CASE__ = "linspace" , SCREAMING_SNAKE_CASE__ = 0 , ):
if trained_betas is not None:
lowercase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase : str = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase : Optional[int] = 1.0 - self.betas
lowercase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if schedule_timesteps is None:
lowercase : Union[str, Any] = self.timesteps
lowercase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase : List[Any] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0
else:
lowercase : int = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
lowercase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
if self.state_in_first_order:
lowercase : Any = self.sigmas[step_index]
else:
lowercase : Optional[int] = self.sigmas_interpol[step_index]
lowercase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Any = num_inference_steps
lowercase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase : Dict = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Dict = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowercase : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ )
# interpolate sigmas
lowercase : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowercase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
# mps does not support float64
lowercase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
else:
lowercase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
# interpolate timesteps
lowercase : Any = self.sigma_to_t(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=timesteps.dtype )
lowercase : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowercase : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase : Dict = defaultdict(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# get log sigma
lowercase : Any = sigma.log()
# get distribution
lowercase : Optional[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase : str = low_idx + 1
lowercase : Union[str, Any] = self.log_sigmas[low_idx]
lowercase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase : Dict = (low - log_sigma) / (low - high)
lowercase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
lowercase : List[str] = (1 - w) * low_idx + w * high_idx
lowercase : Tuple = t.view(sigma.shape )
return t
@property
def __lowerCamelCase ( self ):
return self.sample is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
# advance index counter by 1
lowercase : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase : Union[str, Any] = self.sigmas[step_index]
lowercase : List[Any] = self.sigmas_interpol[step_index + 1]
lowercase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase : Any = self.sigmas[step_index - 1]
lowercase : List[Any] = self.sigmas_interpol[step_index]
lowercase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase : Union[str, Any] = 0
lowercase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase : Optional[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase : str = sigma_next - sigma_hat
lowercase : List[str] = self.sample
lowercase : Optional[int] = None
lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
# mps does not support float64
lowercase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase : List[str] = self.timesteps.to(original_samples.device )
lowercase : Any = timesteps.to(original_samples.device )
lowercase : Tuple = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps]
lowercase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase : Any = sigma.unsqueeze(-1 )
lowercase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 369 |
def __lowercase ( _UpperCamelCase = 50 ) ->int:
"""simple docstring"""
lowercase : str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173 | 0 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ):
lowerCamelCase : Tuple = False
for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j]
lowerCamelCase : Optional[int] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 283 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = StableUnCLIPPipeline
__A : Optional[int] = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 32
lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A )
lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
lowerCamelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = AutoencoderKL()
lowerCamelCase : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__A )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 283 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__: Dict = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = ["""BeitFeatureExtractor"""]
__magic_name__: Any = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[str] = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352 |
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33 |
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 330 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowerCAmelCase : Union[str, Any] = (low + high) // 2
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = max_subarray(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = max_subarray(_UpperCamelCase , mid + 1 , _UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = max_cross_sum(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = float('-inf' ), -1
__lowerCAmelCase , __lowerCAmelCase : List[str] = float('-inf' ), -1
__lowerCAmelCase : int | float = 0
for i in range(_UpperCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowerCAmelCase : Union[str, Any] = summ
__lowerCAmelCase : Any = i
__lowerCAmelCase : Tuple = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowerCAmelCase : str = summ
__lowerCAmelCase : int = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = [randint(1 , _UpperCamelCase ) for _ in range(_UpperCamelCase )]
__lowerCAmelCase : List[Any] = time.time()
max_subarray(_UpperCamelCase , 0 , input_size - 1 )
__lowerCAmelCase : Tuple = time.time()
return end - start
def __lowerCAmelCase ():
__lowerCAmelCase : str = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__lowerCAmelCase : int = [time_max_subarray(_UpperCamelCase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(_UpperCamelCase , _UpperCamelCase ):
print(_UpperCamelCase , '\t\t' , _UpperCamelCase )
plt.plot(_UpperCamelCase , _UpperCamelCase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 182 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = {}
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , ):
__lowerCAmelCase : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
__lowerCAmelCase : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
__lowerCAmelCase : Any = format_type
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ):
__lowerCAmelCase : int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowerCamelCase__ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowerCamelCase__ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowerCamelCase__ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def __lowerCAmelCase (_UpperCamelCase ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase (_UpperCamelCase , **_UpperCamelCase ):
__lowerCAmelCase : Any = get_format_type_from_alias(_UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" ) | 182 | 1 |
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
if len(_A ) < 2:
return collection
def circle_sort_util(_A , _A , _A ) -> bool:
snake_case__ = False
if low == high:
return swapped
snake_case__ = low
snake_case__ = high
while left < right:
if collection[left] > collection[right]:
snake_case__ , snake_case__ = (
collection[right],
collection[left],
)
snake_case__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case__ , snake_case__ = (
collection[right + 1],
collection[left],
)
snake_case__ = True
snake_case__ = low + int((high - low) / 2 )
snake_case__ = circle_sort_util(_A , _A , _A )
snake_case__ = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
snake_case__ = True
while is_not_sorted is True:
snake_case__ = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
__UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 307 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Any = ['''torch''', '''scipy''']
def __init__( self: int , *UpperCamelCase_: Any , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls: Any , *UpperCamelCase_: Any , **UpperCamelCase_: Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 110 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def snake_case ( snake_case__ :str) -> Optional[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A = k.replace(snake_case__ , snake_case__)
if k.startswith("""encoder"""):
_A = k.replace(""".attn""" , """.self_attn""")
_A = k.replace("""norm1""" , """self_attn_layer_norm""")
_A = k.replace("""norm2""" , """final_layer_norm""")
elif k.startswith("""decoder"""):
_A = k.replace("""norm1""" , """self_attn_layer_norm""")
_A = k.replace("""norm2""" , """encoder_attn_layer_norm""")
_A = k.replace("""norm3""" , """final_layer_norm""")
return k
def snake_case ( snake_case__ :Optional[Any]) -> str:
_A = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A = sd.pop(snake_case__)
_A = k.replace("""layernorm_embedding""" , """layer_norm""")
assert new_k not in sd
_A = v
_SCREAMING_SNAKE_CASE = ['START']
@torch.no_grad()
def snake_case ( snake_case__ :int , snake_case__ :List[Any] , snake_case__ :Tuple) -> List[Any]:
_A = torch.load(snake_case__ , map_location="""cpu""")
_A = model["""model"""]
_A = BlenderbotConfig.from_json_file(snake_case__)
_A = BlenderbotForConditionalGeneration(snake_case__)
_A = m.model.state_dict().keys()
_A = []
_A = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A = rename_state_dict_key(snake_case__)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
_A = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case__)
m.model.load_state_dict(snake_case__ , strict=snake_case__)
m.half()
m.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 81 | import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = (UnCLIPScheduler,)
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
_A = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**lowerCAmelCase_ )
return config
def UpperCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(variance_type="""fixed_small_log""" )
_A = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(variance_type="""learned_range""" )
_A = scheduler_class(**lowerCAmelCase_ )
_A = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase_ ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCAmelCase_ ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCAmelCase_ ) - -0.001_0011 < 1E-5
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
_A = scheduler.timesteps
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(25 )
_A = scheduler.timesteps
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
if i + 1 == timesteps.shape[0]:
_A = None
else:
_A = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> List[Any]:
pass
| 81 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : int ={
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase_ : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
UpperCAmelCase_ : int = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
UpperCAmelCase_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : int = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase_ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase_ : int = value
elif weight_type == "running_mean":
UpperCAmelCase_ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase_ : int = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "inv_freq":
UpperCAmelCase_ : List[Any] = value
else:
UpperCAmelCase_ : Optional[int] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = fairseq_model.state_dict()
UpperCAmelCase_ : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ : str = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase_ : Any = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ : Optional[int] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ : Any = True
if "*" in mapped_key:
UpperCAmelCase_ : Tuple = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase_ : Union[str, Any] = mapped_key.replace('''*''' , _lowerCAmelCase )
if "pos_bias_u" in name:
UpperCAmelCase_ : Optional[Any] = None
elif "pos_bias_v" in name:
UpperCAmelCase_ : int = None
elif "weight_g" in name:
UpperCAmelCase_ : Optional[Any] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ : Tuple = "weight_v"
elif "bias" in name:
UpperCAmelCase_ : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ : List[Any] = "weight"
elif "running_mean" in name:
UpperCAmelCase_ : Any = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ : List[str] = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ : List[str] = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ : Any = "num_batches_tracked"
else:
UpperCAmelCase_ : Tuple = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ : Dict = name.split('''.''' )
UpperCAmelCase_ : str = int(items[0] )
UpperCAmelCase_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase_ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase_ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase_ : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase_ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : int = WavaVecaConformerConfig.from_pretrained(_lowerCAmelCase , hidden_act='''swish''' )
else:
UpperCAmelCase_ : int = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ : str = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ : Optional[int] = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ : Optional[int] = target_dict.pad_index
UpperCAmelCase_ : Union[str, Any] = target_dict.bos_index
UpperCAmelCase_ : Any = target_dict.eos_index
UpperCAmelCase_ : str = len(target_dict.symbols )
UpperCAmelCase_ : List[str] = os.path.join(_lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase_ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ : Tuple = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCAmelCase , )
UpperCAmelCase_ : str = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
UpperCAmelCase_ : Tuple = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ : List[str] = WavaVecaConformerForCTC(_lowerCAmelCase )
else:
UpperCAmelCase_ : Any = WavaVecaConformerForPreTraining(_lowerCAmelCase )
if is_finetuned:
UpperCAmelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase_ : Optional[int] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase_ : Dict = fairseq.tasks.setup_task(_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__a = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 354 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,) -> Dict:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Dict = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Dict = self.type_sequence_label_size
UpperCAmelCase_ : int = FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : List[Any] = FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : List[str] = config_and_inputs
UpperCAmelCase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def a__ ( self ) -> None:
UpperCAmelCase_ : List[Any] = FlaxBeitModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ : Dict = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ : List[str] = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase_ : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase_ : Union[str, Any] = np.ones((1, 196) ,dtype=_SCREAMING_SNAKE_CASE )
# forward pass
UpperCAmelCase_ : Optional[int] = model(pixel_values=_SCREAMING_SNAKE_CASE ,bool_masked_pos=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = outputs.logits
# verify the logits
UpperCAmelCase_ : List[str] = (1, 196, 8_192)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ) )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# forward pass
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Dict = (1, 1_000)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
UpperCAmelCase_ : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# forward pass
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Union[str, Any] = (1, 21_841)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
UpperCAmelCase_ : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE ) | 235 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = test_results.split(" ")
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE : Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = False
for line in failures_short_lines.split("\n"):
if re.search(r"_ \[doctest\]" , UpperCamelCase__):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = line.split(" ")[2]
elif in_error and not line.split(" ")[0].isdigit():
SCREAMING_SNAKE_CASE : Optional[int] = line
SCREAMING_SNAKE_CASE : Dict = False
return failures
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : int , a : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = title
SCREAMING_SNAKE_CASE : List[Any] = doc_test_results["time_spent"].split("," )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = doc_test_results["success"]
SCREAMING_SNAKE_CASE : Any = doc_test_results["failures"]
SCREAMING_SNAKE_CASE : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE : Union[str, Any] = doc_test_results
@property
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [self._time_spent]
SCREAMING_SNAKE_CASE : int = 0
for time in time_spent:
SCREAMING_SNAKE_CASE : Dict = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE : Tuple = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s"
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 40
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
SCREAMING_SNAKE_CASE : List[str] = ""
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__UpperCAmelCase , )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE : int = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
SCREAMING_SNAKE_CASE : Optional[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__UpperCAmelCase , )
def __UpperCamelCase ( self : List[str] , a : Any , a : Dict , a : Dict , a : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for key, value in failures.items():
SCREAMING_SNAKE_CASE : Tuple = value[:200] + " [Truncated]" if len(__UpperCAmelCase ) > 250 else value
failures_text += F"*{key}*\n_{value}_\n\n"
SCREAMING_SNAKE_CASE : Dict = job_name
SCREAMING_SNAKE_CASE : str = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE : List[Any] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
SCREAMING_SNAKE_CASE : List[str] = sorted(self.doc_test_results.items() , key=lambda a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
SCREAMING_SNAKE_CASE : Optional[int] = F"*Num failures* :{len(job_result['failed'] )} \n"
SCREAMING_SNAKE_CASE : Union[str, Any] = job_result["failures"]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=__UpperCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[str] = os.environ["GITHUB_RUN_ID"]
SCREAMING_SNAKE_CASE : int = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(UpperCamelCase__).json()
SCREAMING_SNAKE_CASE : List[str] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
SCREAMING_SNAKE_CASE : Any = math.ceil((result["total_count"] - 100) / 100)
for i in range(UpperCamelCase__):
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + f"&page={i + 2}").json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , UpperCamelCase__)
return {}
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = {}
if os.path.exists(UpperCamelCase__):
SCREAMING_SNAKE_CASE : str = os.listdir(UpperCamelCase__)
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__) , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__)}.") from e
return _artifact
def lowerCamelCase__ ( ):
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = name
SCREAMING_SNAKE_CASE : int = []
def __str__( self : Any ) -> List[str]:
"""simple docstring"""
return self.name
def __UpperCamelCase ( self : Dict , a : List[str] ) -> int:
"""simple docstring"""
self.paths.append({"name": self.name, "path": path} )
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[str] = filter(os.path.isdir , os.listdir())
for directory in directories:
SCREAMING_SNAKE_CASE : str = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE : List[Any] = Artifact(UpperCamelCase__)
_available_artifacts[artifact_name].add_path(UpperCamelCase__)
return _available_artifacts
if __name__ == "__main__":
a_ = get_job_links()
a_ = retrieve_available_artifacts()
a_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a_ = github_actions_job_links.get('run_doctests')
a_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
a_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
a_ , a_ , a_ = handle_test_results(artifact['stats'])
a_ = failed
a_ = success
a_ = time_spent[1:-1] + ', '
a_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
a_ = line.replace('FAILED ', '')
a_ = line.split()[0].replace('\n', '')
if "::" in line:
a_ , a_ = line.split('::')
else:
a_ , a_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a_ = all_failures[test] if test in all_failures else 'N/A'
a_ = failure
break
a_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply() | 76 | """simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_SCREAMING_SNAKE_CASE : Tuple = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : int=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=99 , lowercase_ : int=32 , lowercase_ : List[Any]=2 , lowercase_ : Optional[int]=4 , lowercase_ : Dict=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Dict=16 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=0.0_2 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=4 , lowercase_ : Any=None , ):
UpperCamelCase__ : Any =parent
UpperCamelCase__ : Any =13
UpperCamelCase__ : int =7
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Dict =True
UpperCamelCase__ : int =True
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Any =99
UpperCamelCase__ : Any =32
UpperCamelCase__ : Union[str, Any] =2
UpperCamelCase__ : List[Any] =4
UpperCamelCase__ : Any =37
UpperCamelCase__ : Union[str, Any] ='''gelu'''
UpperCamelCase__ : Dict =0.1
UpperCamelCase__ : int =0.1
UpperCamelCase__ : Union[str, Any] =512
UpperCamelCase__ : Dict =16
UpperCamelCase__ : List[Any] =2
UpperCamelCase__ : str =0.0_2
UpperCamelCase__ : Optional[Any] =3
UpperCamelCase__ : List[str] =4
UpperCamelCase__ : Optional[int] =None
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Any =None
if self.use_input_mask:
UpperCamelCase__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : List[Any] =None
if self.use_token_type_ids:
UpperCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : str =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : str =None
if self.use_labels:
UpperCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : int =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : str =TFRoFormerModel(config=lowercase_ )
UpperCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ : Dict =[input_ids, input_mask]
UpperCamelCase__ : Tuple =model(lowercase_ )
UpperCamelCase__ : str =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =True
UpperCamelCase__ : List[Any] =TFRoFormerForCausalLM(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Any =model(lowercase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
UpperCamelCase__ : str =TFRoFormerForMaskedLM(config=lowercase_ )
UpperCamelCase__ : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[int] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : Tuple =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForSequenceClassification(config=lowercase_ )
UpperCamelCase__ : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =self.num_choices
UpperCamelCase__ : Tuple =TFRoFormerForMultipleChoice(config=lowercase_ )
UpperCamelCase__ : Optional[int] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : List[str] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ : Tuple =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
UpperCamelCase__ : Optional[int] =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForTokenClassification(config=lowercase_ )
UpperCamelCase__ : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : int =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ):
UpperCamelCase__ : Dict =TFRoFormerForQuestionAnswering(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : List[str] =model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : Tuple =config_and_inputs
UpperCamelCase__ : Any ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[Any] =TFRoFormerModelTester(self )
UpperCamelCase__ : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase_ )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : List[str] =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase__ : List[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : Any =model(lowercase_ )[0]
# TODO Replace vocab size
UpperCamelCase__ : Union[str, Any] =5_0000
UpperCamelCase__ : Optional[Any] =[1, 6, vocab_size]
self.assertEqual(output.shape , lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase__ : Optional[Any] =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4 )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : str =tf.constant([[4, 10]] )
UpperCamelCase__ : Dict =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase__ : Any =emba(input_ids.shape )
UpperCamelCase__ : Union[str, Any] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Dict =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
UpperCamelCase__ : int =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase__ : Optional[int] =emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : str ):
# 2,12,16,64
UpperCamelCase__ : Optional[int] =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[int] =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase__ : Union[str, Any] =embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
UpperCamelCase__ : List[str] =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
| 157 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=False , ) -> Any:
a =size if size is not None else {"""height""": 20, """width""": 20}
a =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
a =parent
a =batch_size
a =num_channels
a =image_size
a =min_resolution
a =max_resolution
a =do_resize
a =size
a =do_center_crop
a =crop_size
a =do_normalize
a =image_mean
a =image_std
a =do_reduce_labels
def SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A ( ):
"""simple docstring"""
a =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a =Image.open(dataset[0]['''file'''] )
a =Image.open(dataset[1]['''file'''] )
return image, map
def _A ( ):
"""simple docstring"""
a =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a =Image.open(ds[0]['''file'''] )
a =Image.open(ds[1]['''file'''] )
a =Image.open(ds[2]['''file'''] )
a =Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A ( lowercase_, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = BeitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =BeitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''image_std''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase_ )
a =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a =image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a =image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a =image_processing(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
a =[]
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
a =image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
a =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
a =prepare_semantic_single_inputs()
a =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
a =prepare_semantic_batch_inputs()
a =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
a =prepare_semantic_single_inputs()
a =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
a =True
a =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 ) | 81 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 134 | 0 |
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = 'openai/whisper-base'
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
_SCREAMING_SNAKE_CASE : Any = 'transcriber'
_SCREAMING_SNAKE_CASE : Optional[Any] = WhisperProcessor
_SCREAMING_SNAKE_CASE : int = WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE : int = ['audio']
_SCREAMING_SNAKE_CASE : List[str] = ['text']
def lowerCAmelCase (self : str , snake_case_ : Optional[Any] ):
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple ):
return self.model.generate(inputs=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Tuple ):
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
| 368 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ ='bert-base-cased'
lowercase__ ='google/pegasus-xsum'
lowercase__ =[' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase__ =['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase__ ='patrickvonplaten/t5-tiny-random'
lowercase__ ='sshleifer/bart-tiny-random'
lowercase__ ='sshleifer/tiny-mbart'
lowercase__ ='sshleifer/tiny-marian-en-de'
def __UpperCamelCase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : list ):
__a : List[Any] = '''\n'''.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open('''w''' ).writelines(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.source" ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.target" ) , lowerCAmelCase__ )
return tmp_dir
class UpperCamelCase__ ( __lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCAmelCase (self : int , snake_case_ : int ):
__a : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__a : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__a : str = 4
__a : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__a , __a : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__a : List[Any] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
__a : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__a : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__a : Dict = 4
__a : Optional[int] = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=2_0 , max_target_length=snake_case_ , )
__a : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase (self : List[str] ):
__a : int = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__a : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines()
__a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ )
__a : Optional[Any] = {x.name for x in tmp_dir.iterdir()}
__a : Union[str, Any] = {x.name for x in save_dir.iterdir()}
__a : str = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowerCAmelCase (self : Any ):
if not FAIRSEQ_AVAILABLE:
return
__a , __a , __a : Any = self._get_dataset(max_len=6_4 )
__a : int = 6_4
__a : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
__a : List[str] = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
__a : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__a : Tuple = []
__a : Union[str, Any] = []
for batch in data_loader:
__a : Any = batch['''input_ids'''].shape
__a : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__a : Optional[Any] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(f"too many tokens in {len(snake_case_ )} batches" )
def lowerCAmelCase (self : int ):
__a , __a , __a : Optional[int] = self._get_dataset(max_len=5_1_2 )
__a : Union[str, Any] = 2
__a : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
__a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
__a : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(snake_case_ : Union[str, Any] , snake_case_ : List[str]="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) < sum(count_pad_tokens(snake_case_ , k='''labels''' ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : int=1_0_0_0 , snake_case_ : Optional[Any]=1_2_8 ):
if os.getenv('''USE_REAL_DATA''' , snake_case_ ):
__a : Optional[int] = '''examples/seq2seq/wmt_en_ro'''
__a : List[Any] = max_len * 2 * 6_4
if not Path(snake_case_ ).joinpath('''train.len''' ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
__a : int = '''examples/seq2seq/test_data/wmt_en_ro'''
__a : List[str] = max_len * 4
save_len_file(snake_case_ , snake_case_ )
__a : str = AutoTokenizer.from_pretrained(snake_case_ )
__a : Optional[int] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def lowerCAmelCase (self : List[str] ):
__a , __a , __a : str = self._get_dataset()
__a : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
__a : Tuple = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
__a : Any = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__a : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__a : Optional[Any] = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__a : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 90 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
class __A( a ):
snake_case_ = '''encoder-decoder'''
snake_case_ = True
def __init__( self , **_snake_case ) -> str:
'''simple docstring'''
super().__init__(**_snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__a = kwargs.pop('''encoder''' )
__a = encoder_config.pop('''model_type''' )
__a = kwargs.pop('''decoder''' )
__a = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__a = True
__a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.encoder.to_dict()
__a = self.decoder.to_dict()
__a = self.__class__.model_type
return output | 6 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 0 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , lowercase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = RobertaConfig
__snake_case = '''roberta'''
def __init__( self : List[Any] , __UpperCAmelCase : Any ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a = RobertaEmbeddings(__UpperCAmelCase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , lowercase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = RobertaConfig
__snake_case = '''roberta'''
def __init__( self : str , __UpperCAmelCase : str ) ->Any:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a = config.num_labels
a = config.num_hidden_layers
a = DeeRobertaModel(__UpperCAmelCase )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=-1 , __UpperCAmelCase : Optional[Any]=False , ) ->int:
"""simple docstring"""
a = self.num_layers
try:
a = self.roberta(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
a = outputs[1]
a = self.dropout(__UpperCAmelCase )
a = self.classifier(__UpperCAmelCase )
a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a = e.message
a = e.exit_layer
a = outputs[0]
if not self.training:
a = entropy(__UpperCAmelCase )
a = []
a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a = []
for highway_exit in outputs[-1]:
a = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a = (loss,) + outputs
if not self.training:
a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 359 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 0 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__a = logging.get_logger(__name__)
__a = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(snake_case )
def __call__( self: int , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: Optional[Any] ) -> bool:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Any , snake_case: int , snake_case: Optional[int] = None ) -> List[str]:
snake_case_ :Dict = max_length
snake_case_ :Union[str, Any] = max_position_embeddings
@add_start_docstrings(snake_case )
def __call__( self: Dict , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: str ) -> bool:
snake_case_ :Dict = input_ids.shape[-1]
snake_case_ :Union[str, Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , snake_case: int , snake_case: int ) -> Dict:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" , snake_case , )
snake_case_ :Union[str, Any] = start_length
snake_case_ :int = max_new_tokens
snake_case_ :int = start_length + max_new_tokens
@add_start_docstrings(snake_case )
def __call__( self: Optional[int] , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: str ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: float , snake_case: Optional[float] = None ) -> Union[str, Any]:
snake_case_ :Tuple = max_time
snake_case_ :List[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case )
def __call__( self: Tuple , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(snake_case )
def __call__( self: str , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: int ) -> bool:
return any(criteria(snake_case , snake_case ) for criteria in self )
@property
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(snake_case , snake_case ):
return stopping_criterium.max_length
elif isinstance(snake_case , snake_case ):
return stopping_criterium.max_length
return None
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = stopping_criteria.max_length
snake_case_ :List[str] = deepcopy(_lowercase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""", _lowercase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowercase ) )
return new_stopping_criteria
| 66 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301 | 0 |
'''simple docstring'''
import numpy as np
def lowerCamelCase__ ( A : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowercase : List[str] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( A : str , A : str , A : List[Any]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase = random.Random()
UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase = []
for _ in range(A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase = np.array(A , dtype=jnp.intaa ).reshape(A )
return output
def lowerCamelCase__ ( A : int , A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase = ids_tensor(A , vocab_size=2 , rng=A )
# make sure that at least one token is attended to for each batch
UpperCAmelCase = 1
return attn_mask
@require_flax
class UpperCamelCase__:
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Any] = ()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase = 2
UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase = jnp.ones_like(lowerCAmelCase )
UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params )
UpperCAmelCase = flax_model.generate(lowerCAmelCase ).sequences
UpperCAmelCase = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
UpperCAmelCase = 0.8
UpperCAmelCase = 10
UpperCAmelCase = 0.3
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = 2
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase = '''Hello world'''
UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase , '''do_samples''' ):
model.generate(lowerCAmelCase , do_samples=lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase , '''foo''' ):
UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase , **lowerCAmelCase )
| 91 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
a : List[str] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Dict = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , x.transpose()))
a : str = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def __snake_case ( self : Tuple):
a : Union[str, Any] = np.random.randn(3 , 4)
a : Union[str, Any] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , transpose(__UpperCAmelCase).numpy()))
a : Tuple = np.random.randn(3 , 4 , 5)
a : Optional[int] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , transpose(__UpperCAmelCase , axes=(1, 2, 0)).numpy()))
@require_tf
def __snake_case ( self : List[str]):
a : int = np.random.randn(3 , 4)
a : Optional[int] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , transpose(__UpperCAmelCase).numpy()))
a : Optional[Any] = np.random.randn(3 , 4 , 5)
a : List[str] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , transpose(__UpperCAmelCase , axes=(1, 2, 0)).numpy()))
@require_flax
def __snake_case ( self : str):
a : Union[str, Any] = np.random.randn(3 , 4)
a : Dict = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , np.asarray(transpose(__UpperCAmelCase))))
a : str = np.random.randn(3 , 4 , 5)
a : List[str] = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , np.asarray(transpose(__UpperCAmelCase , axes=(1, 2, 0)))))
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , np.reshape(__UpperCAmelCase , (4, 3))))
a : Dict = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , np.reshape(__UpperCAmelCase , (12, 5))))
@require_torch
def __snake_case ( self : Tuple):
a : List[Any] = np.random.randn(3 , 4)
a : Tuple = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , reshape(__UpperCAmelCase , (4, 3)).numpy()))
a : List[str] = np.random.randn(3 , 4 , 5)
a : Union[str, Any] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , reshape(__UpperCAmelCase , (12, 5)).numpy()))
@require_tf
def __snake_case ( self : Optional[int]):
a : List[Any] = np.random.randn(3 , 4)
a : Dict = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , reshape(__UpperCAmelCase , (4, 3)).numpy()))
a : Union[str, Any] = np.random.randn(3 , 4 , 5)
a : Union[str, Any] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , reshape(__UpperCAmelCase , (12, 5)).numpy()))
@require_flax
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = np.random.randn(3 , 4)
a : Any = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , np.asarray(reshape(__UpperCAmelCase , (4, 3)))))
a : List[str] = np.random.randn(3 , 4 , 5)
a : Union[str, Any] = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , np.asarray(reshape(__UpperCAmelCase , (12, 5)))))
def __snake_case ( self : Optional[int]):
a : Tuple = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , np.squeeze(__UpperCAmelCase)))
a : List[str] = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , np.squeeze(__UpperCAmelCase , axis=2)))
@require_torch
def __snake_case ( self : Tuple):
a : Dict = np.random.randn(1 , 3 , 4)
a : Any = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , squeeze(__UpperCAmelCase).numpy()))
a : Any = np.random.randn(1 , 4 , 1 , 5)
a : str = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , squeeze(__UpperCAmelCase , axis=2).numpy()))
@require_tf
def __snake_case ( self : Tuple):
a : int = np.random.randn(1 , 3 , 4)
a : List[str] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , squeeze(__UpperCAmelCase).numpy()))
a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
a : int = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , squeeze(__UpperCAmelCase , axis=2).numpy()))
@require_flax
def __snake_case ( self : Dict):
a : Tuple = np.random.randn(1 , 3 , 4)
a : List[Any] = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , np.asarray(squeeze(__UpperCAmelCase))))
a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
a : Tuple = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , np.asarray(squeeze(__UpperCAmelCase , axis=2))))
def __snake_case ( self : List[Any]):
a : Optional[int] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , np.expand_dims(__UpperCAmelCase , axis=1)))
@require_torch
def __snake_case ( self : Optional[Any]):
a : Any = np.random.randn(3 , 4)
a : Optional[Any] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , expand_dims(__UpperCAmelCase , axis=1).numpy()))
@require_tf
def __snake_case ( self : str):
a : int = np.random.randn(3 , 4)
a : int = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , expand_dims(__UpperCAmelCase , axis=1).numpy()))
@require_flax
def __snake_case ( self : List[Any]):
a : Optional[int] = np.random.randn(3 , 4)
a : Any = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , np.asarray(expand_dims(__UpperCAmelCase , axis=1))))
| 40 |
'''simple docstring'''
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 250 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,*lowercase_ : str ,**lowercase_ : Optional[int] ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase_ ,)
super().__init__(*lowercase_ ,**lowercase_ )
| 74 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : int = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( A_ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ : str = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(A_ , id=A_ )
| 74 | 1 |
"""simple docstring"""
lowerCamelCase_ : int = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 81 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = """▁"""
lowerCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase_ : Any = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCamelCase_ : Tuple = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a =1
a =len(self.sp_model ) + self.fairseq_offset
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
a =self.__dict__.copy()
a =None
a =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ) -> List[Any]:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
a =''''''.join(__A ).replace(__A , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,) | 81 | 1 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _snake_case ( _snake_case : Optional[Any] ):
lowerCAmelCase : Any = torch.exp(_snake_case )
lowerCAmelCase : Union[str, Any] = torch.sum(_snake_case , dim=1 ) # sum of exp(x_i)
lowerCAmelCase : Dict = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_snake_case ) - B / A
class snake_case_( nn.Module ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : List[Any] ):
super().__init__()
lowerCAmelCase : List[Any] = config.output_attentions
lowerCAmelCase : Optional[int] = config.output_hidden_states
lowerCAmelCase : Optional[Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase : int = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase : int = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase : Optional[Any] = x
else:
lowerCAmelCase : Dict = x
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Any=None , ):
lowerCAmelCase : Tuple = ()
lowerCAmelCase : Dict = ()
lowerCAmelCase : Optional[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase : str = all_hidden_states + (hidden_states,)
lowerCAmelCase : Dict = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase : List[Any] = all_attentions + (layer_outputs[1],)
lowerCAmelCase : int = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase : List[str] = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
lowerCAmelCase : Union[str, Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
lowerCAmelCase : Tuple = highway_exit[0]
lowerCAmelCase : Optional[Any] = entropy(UpperCamelCase_ )
lowerCAmelCase : List[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase : int = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
lowerCAmelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase : int = all_hidden_states + (hidden_states,)
lowerCAmelCase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase : Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase : List[Any] = outputs + (all_attentions,)
lowerCAmelCase : int = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , a__ , )
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : List[str] ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : List[Any] = config
lowerCAmelCase : Union[str, Any] = BertEmbeddings(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = DeeBertEncoder(UpperCamelCase_ )
lowerCAmelCase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def lowerCamelCase__ ( self : List[Any] ):
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase__ ( self : str ):
return self.embeddings.word_embeddings
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int ):
lowerCAmelCase : str = value
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowerCAmelCase : List[Any] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : int = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowerCAmelCase : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
lowerCAmelCase : Optional[Any] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
lowerCAmelCase : Dict = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase : List[str] = encoder_attention_mask[:, None, None, :]
lowerCAmelCase : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase : Any = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : Optional[Any] = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
lowerCAmelCase : Any = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = encoder_outputs[0]
lowerCAmelCase : List[Any] = self.pooler(UpperCamelCase_ )
lowerCAmelCase : Tuple = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case_( a__ ):
def __init__( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ):
lowerCAmelCase : Any = message
lowerCAmelCase : Optional[int] = exit_layer # start from 1!
class snake_case_( nn.Module ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : Tuple ):
super().__init__()
lowerCAmelCase : Union[str, Any] = BertPooler(UpperCamelCase_ )
lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : Optional[int] = nn.Linear(config.hidden_size , config.num_labels )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
# Pooler
lowerCAmelCase : str = encoder_outputs[0]
lowerCAmelCase : List[str] = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
lowerCAmelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase : List[Any] = bmodel_output[1]
lowerCAmelCase : Optional[Any] = self.dropout(UpperCamelCase_ )
lowerCAmelCase : Dict = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class snake_case_( a__ ):
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : str = config.num_hidden_layers
lowerCAmelCase : List[Any] = DeeBertModel(UpperCamelCase_ )
lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : int=-1 , UpperCamelCase_ : str=False , ):
lowerCAmelCase : Optional[Any] = self.num_layers
try:
lowerCAmelCase : List[Any] = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase : str = outputs[1]
lowerCAmelCase : List[Any] = self.dropout(UpperCamelCase_ )
lowerCAmelCase : Dict = self.classifier(UpperCamelCase_ )
lowerCAmelCase : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase : Any = e.message
lowerCAmelCase : List[Any] = e.exit_layer
lowerCAmelCase : Tuple = outputs[0]
if not self.training:
lowerCAmelCase : List[Any] = entropy(UpperCamelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
lowerCAmelCase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Dict = MSELoss()
lowerCAmelCase : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : str = CrossEntropyLoss()
lowerCAmelCase : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
lowerCAmelCase : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 314 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase_ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''albert'''
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=30000 , __UpperCAmelCase : List[str]=128 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : List[str]=64 , __UpperCAmelCase : str=16384 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : int="gelu_new" , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Dict=3 , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = embedding_size
_A = hidden_size
_A = num_hidden_layers
_A = num_hidden_groups
_A = num_attention_heads
_A = inner_group_num
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout_prob
_A = position_embedding_type
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 79 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : List[Any] = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : Optional[Any] = 2 * i + 1
_a : Optional[int] = 2 * i
_a : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 235 | 0 |
import copy
import re
class snake_case_ :
UpperCAmelCase__ : Union[str, Any] = 'hp'
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : List[str] = None
@classmethod
def lowerCamelCase__( cls :Dict ,__snake_case :Dict ,__snake_case :Tuple ) -> Optional[int]:
a__ = prefix
a__ = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase__( __snake_case :List[Any] ,__snake_case :Union[str, Any] ) -> int:
if len(__lowercase ) == 0:
return ""
a__ = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(__lowercase ) + 1 ):
a__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
a__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__snake_case :int ):
a__ = ''
while integer != 0:
a__ = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
a__ = 0
while True:
a__ = word + '#' + int_to_alphabetic(__lowercase )
if sword in info["reverse_short_word"]:
continue
else:
a__ = sword
break
a__ = short_word
a__ = word
return short_word
@staticmethod
def lowerCamelCase__( __snake_case :List[str] ,__snake_case :str ) -> str:
a__ = param_name.split('_' )
a__ = [TrialShortNamer.shortname_for_word(__lowercase ,__lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
a__ = ['', '_']
for separator in separators:
a__ = separator.join(__lowercase )
if shortname not in info["reverse_short_param"]:
a__ = shortname
a__ = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase__( __snake_case :List[str] ,__snake_case :List[Any] ) -> Union[str, Any]:
a__ = TrialShortNamer.shortname_for_key(__lowercase ,__lowercase )
a__ = short_name
a__ = param_name
@classmethod
def lowerCamelCase__( cls :Tuple ) -> Tuple:
if cls.NAMING_INFO is not None:
return
a__ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
a__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowercase ,__lowercase )
a__ = info
@classmethod
def lowerCamelCase__( cls :Optional[int] ,__snake_case :Optional[Any] ) -> str:
cls.build_naming_info()
assert cls.PREFIX is not None
a__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
a__ = cls.NAMING_INFO['short_param'][k]
if isinstance(__lowercase ,__lowercase ):
a__ = 1 if v else 0
a__ = '' if isinstance(__lowercase ,(int, float) ) else '-'
a__ = F'{key}{sep}{v}'
name.append(__lowercase )
return "_".join(__lowercase )
@classmethod
def lowerCamelCase__( cls :int ,__snake_case :Any ) -> List[Any]:
a__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
a__ = []
else:
a__ = repr.split('_' )
a__ = {}
for value in values:
if "-" in value:
a__ , a__ = value.split('-' )
else:
a__ = re.sub('[0-9.]' ,'' ,__lowercase )
a__ = float(re.sub('[^0-9.]' ,'' ,__lowercase ) )
a__ = cls.NAMING_INFO['reverse_short_param'][p_k]
a__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
a__ = cls.DEFAULTS[k]
return parameters
| 359 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 109 | 0 |
def __lowercase ( __lowerCAmelCase : str ):
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __lowercase ( __lowerCAmelCase : str ):
a__ = credit_card_number
a__ = 0
a__ = len(__lowerCAmelCase ) - 2
for i in range(__lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
a__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
a__ = cc_number[:i] + str(__lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def __lowercase ( __lowerCAmelCase : str ):
a__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(__lowerCAmelCase ) <= 1_6:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(__lowerCAmelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(__lowerCAmelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 240 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
snake_case : Dict = logging.get_logger(__name__)
snake_case : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : List[Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case : int = {
'''distilbert-base-uncased''': 5_12,
'''distilbert-base-uncased-distilled-squad''': 5_12,
'''distilbert-base-cased''': 5_12,
'''distilbert-base-cased-distilled-squad''': 5_12,
'''distilbert-base-german-cased''': 5_12,
'''distilbert-base-multilingual-cased''': 5_12,
}
snake_case : Union[str, Any] = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : Optional[int] = DistilBertTokenizer
def __init__( self :Dict ,__snake_case :Dict=None ,__snake_case :Optional[Any]=None ,__snake_case :Optional[Any]=True ,__snake_case :List[Any]="[UNK]" ,__snake_case :str="[SEP]" ,__snake_case :List[Any]="[PAD]" ,__snake_case :Tuple="[CLS]" ,__snake_case :Optional[int]="[MASK]" ,__snake_case :Dict=True ,__snake_case :Dict=None ,**__snake_case :List[Any] ,) -> Optional[int]:
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars
):
a__ = getattr(__snake_case ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**__snake_case )
a__ = do_lower_case
def lowerCamelCase__( self :Any ,__snake_case :List[str] ,__snake_case :int=None ) -> Dict:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :List[str] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 240 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , )
)
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[int] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case_ : Dict = math.log(len(__a ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , __a , __a , __a )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 364 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ : Optional[int] = TapasConfig.from_json_file(__a )
# set absolute/relative position embeddings parameter
snake_case_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ : int = TapasForQuestionAnswering(config=__a )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ : Optional[int] = 4
snake_case_ : List[str] = True
# hparam_utils.py hparams
snake_case_ : Optional[Any] = 0.664694
snake_case_ : Dict = 0.207951
snake_case_ : Tuple = 0.121194
snake_case_ : Dict = True
snake_case_ : int = True
snake_case_ : int = False
snake_case_ : str = 0.0352513
snake_case_ : int = TapasForQuestionAnswering(config=__a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ : int = 4
snake_case_ : Optional[int] = False
# hparam_utils.py hparams
snake_case_ : str = 36.4519
snake_case_ : Optional[Any] = 0.903421
snake_case_ : List[Any] = 222.088
snake_case_ : Optional[int] = True
snake_case_ : Optional[Any] = True
snake_case_ : str = True
snake_case_ : int = 0.763141
snake_case_ : str = TapasForQuestionAnswering(config=__a )
elif task == "TABFACT":
snake_case_ : List[Any] = TapasForSequenceClassification(config=__a )
elif task == "MLM":
snake_case_ : Optional[int] = TapasForMaskedLM(config=__a )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ : Tuple = TapasModel(config=__a )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__a , __a , __a )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(__a )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 88 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _A : List[str] , _A : Optional[Any]=13 , _A : List[Any]=32 , _A : Union[str, Any]=3 , _A : int=4 , _A : Optional[Any]=[10, 20, 30, 40] , _A : Optional[Any]=[2, 2, 3, 2] , _A : int=True , _A : Optional[int]=True , _A : Union[str, Any]=37 , _A : Optional[int]="gelu" , _A : Any=10 , _A : List[str]=0.02 , _A : List[Any]=["stage2", "stage3", "stage4"] , _A : List[Any]=3 , _A : str=None , ) -> Union[str, Any]:
__magic_name__ : Dict = parent
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : Tuple = image_size
__magic_name__ : Optional[int] = num_channels
__magic_name__ : int = num_stages
__magic_name__ : Tuple = hidden_sizes
__magic_name__ : List[Any] = depths
__magic_name__ : List[Any] = is_training
__magic_name__ : List[str] = use_labels
__magic_name__ : Dict = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Union[str, Any] = out_features
__magic_name__ : List[Any] = num_labels
__magic_name__ : Any = scope
__magic_name__ : Any = num_stages
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Dict ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self : Optional[int] , _A : int , _A : Any , _A : List[str] ) -> List[str]:
__magic_name__ : Tuple = UperNetForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
__magic_name__ : str = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Dict = config_and_inputs
__magic_name__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A_ : Optional[Any] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
A_ : List[Any] = False
A_ : List[Any] = False
A_ : int = False
A_ : Optional[Any] = False
A_ : Optional[int] = False
A_ : str = False
def __lowerCAmelCase ( self : Any ) -> List[str]:
__magic_name__ : List[str] = UperNetModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
return
def __lowerCAmelCase ( self : Any ) -> Dict:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = model_class(_A )
__magic_name__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __lowerCAmelCase ( self : Any ) -> str:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __lowerCAmelCase ( self : Dict ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCAmelCase ( self : Any ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self : str ) -> Optional[int]:
def check_hidden_states_output(_A : int , _A : Union[str, Any] , _A : int ):
__magic_name__ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__magic_name__ : Dict = model(**self._prepare_for_class(_A , _A ) )
__magic_name__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : str = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : str = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : List[Any] = True
check_hidden_states_output(_A , _A , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Any = _config_zero_init(_A )
__magic_name__ : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __lowerCAmelCase ( self : int ) -> List[str]:
pass
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
__magic_name__ : List[str] = Image.open(lowerCAmelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__magic_name__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_A )
__magic_name__ : Union[str, Any] = prepare_img()
__magic_name__ : List[Any] = processor(images=_A , return_tensors='pt' ).to(_A )
with torch.no_grad():
__magic_name__ : List[str] = model(**_A )
__magic_name__ : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__magic_name__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_A )
__magic_name__ : Optional[int] = prepare_img()
__magic_name__ : List[str] = processor(images=_A , return_tensors='pt' ).to(_A )
with torch.no_grad():
__magic_name__ : Any = model(**_A )
__magic_name__ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) ) | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__A : Union[str, Any] = HfArgumentParser(InitializationArguments)
__A : Any = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__A : Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__A : Dict = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__A : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__A : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 369 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase (self :Any , _UpperCamelCase :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :str )-> int:
__A = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase (self :int , _UpperCamelCase :Any , _UpperCamelCase :Optional[Any] )-> Dict:
__A = generator('''Something there''' )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__A = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
] , )
__A = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase (self :Optional[int] )-> List[str]:
__A = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__A = generator('''Something there''' , do_sample=_a )
self.assertEqual(_a , [{'''generated_text''': ''''''}] )
__A = 3
__A = generator(
'''Something there''' , num_return_sequences=_a , num_beams=_a , )
__A = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
__A = generator('''This is a test''' , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__A = generator.model.config.eos_token_id
__A = """<pad>"""
__A = generator(
['''This is a test''', '''This is a second test'''] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase (self :int )-> int:
__A = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__A = generator('''Something there''' , do_sample=_a )
self.assertEqual(_a , [{'''generated_text''': ''''''}] )
| 117 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = name
UpperCamelCase_: str = val
def __str__( self ):
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , _lowerCamelCase ):
return self.val < other.val
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = {}
UpperCamelCase_: int = {}
UpperCamelCase_: Dict = self.build_heap(_lowerCamelCase )
def __getitem__( self , _lowerCamelCase ):
return self.get_value(_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
return (idx - 1) // 2
def _a ( self , _lowerCamelCase ):
return idx * 2 + 1
def _a ( self , _lowerCamelCase ):
return idx * 2 + 2
def _a ( self , _lowerCamelCase ):
return self.heap_dict[key]
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = len(_lowerCamelCase ) - 1
UpperCamelCase_: str = self.get_parent_idx(_lowerCamelCase )
for idx, i in enumerate(_lowerCamelCase ):
UpperCamelCase_: Tuple = idx
UpperCamelCase_: Dict = i.val
for i in range(_lowerCamelCase , -1 , -1 ):
self.sift_down(_lowerCamelCase , _lowerCamelCase )
return array
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
while True:
UpperCamelCase_: Tuple = self.get_left_child_idx(_lowerCamelCase ) # noqa: E741
UpperCamelCase_: Tuple = self.get_right_child_idx(_lowerCamelCase )
UpperCamelCase_: Tuple = idx
if l < len(_lowerCamelCase ) and array[l] < array[idx]:
UpperCamelCase_: int = l
if r < len(_lowerCamelCase ) and array[r] < array[smallest]:
UpperCamelCase_: List[str] = r
if smallest != idx:
UpperCamelCase_: Tuple = array[smallest], array[idx]
(
UpperCamelCase_
): Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCamelCase_: Tuple = smallest
else:
break
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.get_parent_idx(_lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCamelCase_: Optional[Any] = self.heap[idx], self.heap[p]
UpperCamelCase_: Optional[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCamelCase_: Union[str, Any] = p
UpperCamelCase_: Dict = self.get_parent_idx(_lowerCamelCase )
def _a ( self ):
return self.heap[0]
def _a ( self ):
UpperCamelCase_: Any = self.heap[-1], self.heap[0]
UpperCamelCase_: Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCamelCase_: Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _a ( self , _lowerCamelCase ):
self.heap.append(_lowerCamelCase )
UpperCamelCase_: str = len(self.heap ) - 1
UpperCamelCase_: List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def _a ( self ):
return len(self.heap ) == 0
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCamelCase_: Union[str, Any] = new_value
UpperCamelCase_: Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
A_ : Any = Node('R', -1)
A_ : Dict = Node('B', 6)
A_ : Optional[int] = Node('A', 3)
A_ : Union[str, Any] = Node('X', 1)
A_ : Optional[int] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A_ : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 369 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 292 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Dict = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
a__ : Dict = {
'''gpt-neox-20b''': 2_0_4_8,
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]="<|endoftext|>" , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : List[str]="<|endoftext|>" , UpperCAmelCase__ : List[Any]=False , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , pre_tok_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 54 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
UpperCAmelCase_ : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
UpperCAmelCase_ : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase_ : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase_ : Dict = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 91 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : Any = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
def count_of_possible_combinations(SCREAMING_SNAKE_CASE__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE__ )
for item in array )
lowercase : Optional[int] = answer
return answer
lowercase : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : str = [0] * (target + 1)
lowercase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Any = 3
lowercase : Optional[Any] = 5
lowercase : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 285 | 0 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
A = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A = AutoTokenizer.from_pretrained('google/mt5-small' )
A = tokenizer('Hello there' ,return_tensors='np' ).input_ids
A = tokenizer('Hi I am' ,return_tensors='np' ).input_ids
A = shift_tokens_right(A_ ,model.config.pad_token_id ,model.config.decoder_start_token_id )
A = model(A_ ,decoder_input_ids=A_ ).logits
A = optax.softmax_cross_entropy(A_ ,onehot(A_ ,logits.shape[-1] ) ).mean()
A = -(labels.shape[-1] * loss.item())
A = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 74 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 1 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 |
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE :int = 'examples/'
SCREAMING_SNAKE_CASE :str = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
SCREAMING_SNAKE_CASE :int = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
SCREAMING_SNAKE_CASE :List[str] = 'README.md'
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.read()
__A , __A = REPLACE_PATTERNS[pattern]
__A = replace.replace("VERSION" , a_ )
__A = re_pattern.sub(a_ , a_ )
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a_ )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(a_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" )
def UpperCAmelCase ( a_ , a_=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a_ , a_ , a_ )
if not patch:
update_version_in_examples(a_ )
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = "🤗 Transformers currently provides the following architectures"
__A = "1. Want to contribute a new model?"
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.readlines()
# Find the start of the list.
__A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__A = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a_ )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
__A = f.read()
__A = REPLACE_PATTERNS["init"][0].search(a_ ).groups()[0]
return packaging.version.parse(a_ )
def UpperCAmelCase ( a_=False ) -> str:
"""simple docstring"""
__A = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__A = default_version.base_version
elif patch:
__A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__A = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__A = input(F'''Which version are you releasing? [{default_version}]''' )
if len(a_ ) == 0:
__A = default_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ , patch=a_ )
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = get_version()
__A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__A = current_version.base_version
# Check with the user we got that right.
__A = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(a_ ) == 0:
__A = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 124 | 1 |
'''simple docstring'''
from statistics import mean
import numpy as np
def A_ ( snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = 0
# Number of processes finished
SCREAMING_SNAKE_CASE:Dict = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
SCREAMING_SNAKE_CASE:str = [0] * no_of_process
# List to include calculation results
SCREAMING_SNAKE_CASE:Any = [0] * no_of_process
# Sort by arrival time.
SCREAMING_SNAKE_CASE:List[str] = [burst_time[i] for i in np.argsort(snake_case )]
SCREAMING_SNAKE_CASE:int = [process_name[i] for i in np.argsort(snake_case )]
arrival_time.sort()
while no_of_process > finished_process_count:
SCREAMING_SNAKE_CASE:Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
SCREAMING_SNAKE_CASE:Optional[Any] = arrival_time[i]
SCREAMING_SNAKE_CASE:Optional[int] = 0
# Index showing the location of the process being performed
SCREAMING_SNAKE_CASE:Dict = 0
# Saves the current response ratio.
SCREAMING_SNAKE_CASE:Tuple = 0
for i in range(0 , snake_case ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
SCREAMING_SNAKE_CASE:int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
SCREAMING_SNAKE_CASE:Dict = temp
SCREAMING_SNAKE_CASE:List[Any] = i
# Calculate the turn around time
SCREAMING_SNAKE_CASE:List[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
SCREAMING_SNAKE_CASE:Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A_ ( snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = [0] * no_of_process
for i in range(0 , snake_case ):
SCREAMING_SNAKE_CASE:Any = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A_ = 5
A_ = ["A", "B", "C", "D", "E"]
A_ = [1, 2, 3, 4, 5]
A_ = [1, 2, 3, 4, 5]
A_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 139 |
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=50 , __lowercase=0.0_2 , __lowercase=True , __lowercase=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_labels
__lowerCAmelCase = scope
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case (self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ):
__lowerCAmelCase = BertGenerationEncoder(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase )
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ):
__lowerCAmelCase = True
__lowerCAmelCase = BertGenerationEncoder(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ):
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = BertGenerationDecoder(config=__lowercase ).to(__lowercase ).eval()
# first forward pass
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0]
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase , ):
__lowerCAmelCase = BertGenerationDecoder(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
__UpperCamelCase : List[str] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _snake_case (self ):
__lowerCAmelCase = BertGenerationEncoderTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = '''bert'''
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase )
def _snake_case (self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(__lowercase )
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case (self ):
__lowerCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__lowerCAmelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCAmelCase = model(__lowercase )[0]
__lowerCAmelCase = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , __lowercase )
__lowerCAmelCase = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case (self ):
__lowerCAmelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__lowerCAmelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCAmelCase = model(__lowercase )[0]
__lowerCAmelCase = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , __lowercase )
__lowerCAmelCase = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
| 371 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 0 |
from math import factorial
snake_case_ = {str(d): factorial(d) for d in range(10)}
def lowerCamelCase__ ( snake_case_ : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case_ ) )
def lowerCamelCase__ ( ) -> int:
__snake_case = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case_ ) if sum_of_digit_factorial(snake_case_ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _snake_case ( UpperCamelCase : Dataset , UpperCamelCase : Dict[str, str] ):
UpperCAmelCase : Any = args.log_outputs
UpperCAmelCase : Any = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCAmelCase : List[Any] = load_metric("""wer""" )
UpperCAmelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCAmelCase : int = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCAmelCase : str = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCAmelCase : Tuple = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase : str = F"log_{dataset_id}_predictions.txt"
UpperCAmelCase : Tuple = F"log_{dataset_id}_targets.txt"
with open(UpperCamelCase , """w""" ) as p, open(UpperCamelCase , """w""" ) as t:
# mapping function to write output
def write_to_file(UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(UpperCamelCase , with_indices=UpperCamelCase )
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase : Dict = re.sub(UpperCamelCase , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCAmelCase : Optional[Any] = """ """.join(text.split(UpperCamelCase ) )
return text
def _snake_case ( UpperCamelCase : Tuple ):
# load dataset
UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=UpperCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase : Optional[int] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase : Tuple = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCamelCase : Any ):
UpperCAmelCase : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase : Tuple = prediction["""text"""]
UpperCAmelCase : List[str] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCAmelCase : int = dataset.map(UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
A: Union[str, Any] = parser.parse_args()
main(args)
| 109 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowercase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = StableUnCLIPImgaImgPipeline
__A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A = frozenset([] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 32
a = embedder_hidden_size
# image encoding components
a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase_ , projection_dim=lowerCamelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase_ )
a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase_ , layers_per_block=1 , upcast_attention=lowerCamelCase_ , use_linear_projection=lowerCamelCase_ , )
torch.manual_seed(0 )
a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
a = AutoencoderKL()
a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 , lowerCamelCase_=True ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("mps" ):
a = torch.manual_seed(lowerCamelCase_ )
else:
a = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if pil_image:
a = input_image * 0.5 + 0.5
a = input_image.clamp(0 , 1 )
a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a = DiffusionPipeline.numpy_to_pil(lowerCamelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableUnCLIPImgaImgPipeline(**lowerCamelCase_ )
a = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = self.get_dummy_inputs(lowerCamelCase_ )
inputs.update({"image_embeds": None} )
a = sd_pipe(**lowerCamelCase_ ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase_ (self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase_ )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = torch.Generator(device="cpu" ).manual_seed(0 )
a = pipe(lowerCamelCase_ , "anime turle" , generator=lowerCamelCase_ , output_type="np" )
a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = torch.Generator(device="cpu" ).manual_seed(0 )
a = pipe(lowerCamelCase_ , "anime turle" , generator=lowerCamelCase_ , output_type="np" )
a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a = pipe(
lowerCamelCase_ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 71 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase: List[Any] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a( A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(A , A )
_lowercase: Optional[int] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a( A : str ) -> Union[str, Any]:
"""simple docstring"""
a = list(s_dict.keys() )
for key in keys:
a = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a = new_key.replace(A , A )
print(f'''{key} -> {new_key}''' )
a = s_dict.pop(A )
return s_dict
def a( A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a , a = emb.weight.shape
a = nn.Linear(A , A , bias=A )
a = emb.weight.data
return lin_layer
def a( A : str , A : str ) -> bytes:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
a = os.path.basename(A )
a = url.split("/" )[-2]
a = os.path.join(A , A )
if os.path.exists(A ) and not os.path.isfile(A ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(A ):
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(A ) as source, open(A , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=A , unit_divisor=1024 ) as loop:
while True:
a = source.read(8192 )
if not buffer:
break
output.write(A )
loop.update(len(A ) )
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def a( A : List[str] , A : Union[str, Any] ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
a = _download(_MODELS[checkpoint_path] )
else:
a = torch.load(A , map_location="cpu" )
a = original_checkpoint["dims"]
a = original_checkpoint["model_state_dict"]
a = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(A )
rename_keys(A )
a = True
a = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=A , decoder_ffn_dim=A , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a = WhisperForConditionalGeneration(A )
a , a = model.model.load_state_dict(A , strict=A )
if len(A ) > 0 and not set(A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
a = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a = proj_out_weights
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase: List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case__ : Tuple = logging.get_logger(__name__)
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__(self :Any , _UpperCamelCase :bool = True , _UpperCamelCase :Dict[str, int] = None , _UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase :bool = True , _UpperCamelCase :Dict[str, int] = None , _UpperCamelCase :bool = True , _UpperCamelCase :Union[int, float] = 1 / 255 , _UpperCamelCase :bool = True , _UpperCamelCase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _UpperCamelCase :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_UpperCamelCase :Optional[int] , )-> None:
super().__init__(**_UpperCamelCase )
__A = size if size is not None else {'''shortest_edge''': 224}
__A = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
__A = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__A = get_size_dict(_UpperCamelCase , param_name='''crop_size''' )
__A = do_resize
__A = size
__A = resample
__A = do_center_crop
__A = crop_size
__A = do_rescale
__A = rescale_factor
__A = do_normalize
__A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :np.ndarray , _UpperCamelCase :Dict[str, int] , _UpperCamelCase :PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :int , )-> np.ndarray:
__A = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__A = int((256 / 224) * size['''shortest_edge'''] )
__A = get_resize_output_image_size(_UpperCamelCase , size=_UpperCamelCase , default_to_square=_UpperCamelCase )
__A = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_UpperCamelCase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :np.ndarray , _UpperCamelCase :Dict[str, int] , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :Dict , )-> np.ndarray:
__A = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Any , _UpperCamelCase :np.ndarray , _UpperCamelCase :Union[int, float] , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :Optional[int] , )-> np.ndarray:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :np.ndarray , _UpperCamelCase :Union[float, List[float]] , _UpperCamelCase :Union[float, List[float]] , _UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase :List[str] , )-> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :ImageInput , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[Dict[str, int]] = None , _UpperCamelCase :PILImageResampling = None , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[Dict[str, int]] = None , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[float] = None , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[Union[float, Iterable[float]]] = None , _UpperCamelCase :Optional[Union[float, Iterable[float]]] = None , _UpperCamelCase :Optional[TensorType] = None , _UpperCamelCase :ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase :str , )-> BatchFeature:
__A = do_resize if do_resize is not None else self.do_resize
__A = resample if resample is not None else self.resample
__A = do_center_crop if do_center_crop is not None else self.do_center_crop
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_normalize if do_normalize is not None else self.do_normalize
__A = image_mean if image_mean is not None else self.image_mean
__A = image_std if image_std is not None else self.image_std
__A = size if size is not None else self.size
__A = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
__A = crop_size if crop_size is not None else self.crop_size
__A = get_size_dict(_UpperCamelCase , param_name='''crop_size''' )
__A = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__A = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
__A = [self.resize(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for image in images]
if do_center_crop:
__A = [self.center_crop(_UpperCamelCase , _UpperCamelCase ) for image in images]
if do_rescale:
__A = [self.rescale(_UpperCamelCase , _UpperCamelCase ) for image in images]
if do_normalize:
__A = [self.normalize(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for image in images]
__A = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
__A = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 117 |
from __future__ import annotations
def _a ( lowerCamelCase: list[float] , lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCamelCase ):
print(F"""{i}\t\t{d}""" )
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: list[float] , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: int ) -> list[float]:
'''simple docstring'''
__A = [float('''inf''' )] * vertex_count
__A = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__A = distance[u] + w
__A = check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Dict = int(input('Enter number of vertices: ').strip())
snake_case__ : Optional[int] = int(input('Enter number of edges: ').strip())
snake_case__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
snake_case__ , snake_case__ , snake_case__ : Dict = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
snake_case__ : List[Any] = {'src': src, 'dst': dest, 'weight': weight}
snake_case__ : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
snake_case__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 117 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE :Optional[int] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE :List[str] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = RobertaTokenizer
def __init__( self : Tuple ,A : int=None ,A : Dict=None ,A : Optional[int]=None ,A : Tuple="replace" ,A : str="<s>" ,A : str="</s>" ,A : Union[str, Any]="</s>" ,A : int="<s>" ,A : str="<unk>" ,A : int="<pad>" ,A : Union[str, Any]="<mask>" ,A : List[Any]=False ,A : Tuple=True ,**A : Dict ,):
super().__init__(
A ,A ,tokenizer_file=A ,errors=A ,bos_token=A ,eos_token=A ,sep_token=A ,cls_token=A ,unk_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,trim_offsets=A ,**A ,)
__A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,A ) != add_prefix_space:
__A = getattr(A ,pre_tok_state.pop("type" ) )
__A = add_prefix_space
__A = pre_tok_class(**A )
__A = add_prefix_space
__A = "post_processor"
__A = getattr(self.backend_tokenizer ,A ,A )
if tokenizer_component_instance:
__A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A = tuple(state["sep"] )
if "cls" in state:
__A = tuple(state["cls"] )
__A = False
if state.get("add_prefix_space" ,A ) != add_prefix_space:
__A = add_prefix_space
__A = True
if state.get("trim_offsets" ,A ) != trim_offsets:
__A = trim_offsets
__A = True
if changes_to_apply:
__A = getattr(A ,state.pop("type" ) )
__A = component_class(**A )
setattr(self.backend_tokenizer ,A ,A )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self : Tuple ,A : List[Any] ):
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else value
__A = value
def UpperCamelCase_ ( self : Dict ,*A : List[str] ,**A : List[str] ):
__A = kwargs.get("is_split_into_words" ,A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ,*A : str ,**A : int ):
__A = kwargs.get("is_split_into_words" ,A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A ,**A )
def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ):
__A = self._tokenizer.model.save(A ,name=A )
return tuple(A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Tuple ,A : Tuple=None ):
__A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 124 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : Optional[Any]= {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any]= [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_a : List[Any]= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 172 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {'''bert_for_seq_generation''': 512}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Tuple="<s>" ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : str="<unk>" ,lowercase__ : Tuple="<pad>" ,lowercase__ : Union[str, Any]="<::::>" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : Any ,):
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[int] ,lowercase__ : Optional[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 104 | 0 |
"""simple docstring"""
import qiskit
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
__lowerCamelCase : Any = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : Dict = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 64 | """simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> str | Literal[False]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = list(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = list(_lowerCamelCase )
__lowerCamelCase : Tuple = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
__lowerCamelCase : Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: list[str] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = []
while True:
__lowerCamelCase : Dict = ["$"] * len(_lowerCamelCase )
__lowerCamelCase : Any = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
__lowerCamelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
__lowerCamelCase : str = "*"
__lowerCamelCase : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
__lowerCamelCase : Tuple = list(set(_lowerCamelCase ) )
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: Sequence[float] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = []
for minterm in minterms:
__lowerCamelCase : Union[str, Any] = ""
for _ in range(_lowerCamelCase ):
__lowerCamelCase : Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: int ) -> bool:
'''simple docstring'''
__lowerCamelCase : Tuple = list(_lowerCamelCase )
__lowerCamelCase : Optional[int] = list(_lowerCamelCase )
__lowerCamelCase : str = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowercase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: list[str] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[Any] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
__lowerCamelCase : List[Any] = j
if count == 1:
__lowerCamelCase : Optional[Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
__lowerCamelCase : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
__lowerCamelCase : str = 0
__lowerCamelCase : Dict = -1
__lowerCamelCase : Tuple = 0
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
__lowerCamelCase : Optional[int] = count_n
__lowerCamelCase : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
__lowerCamelCase : Any = 0
def lowercase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: list[str] ) -> list[list[int]]:
'''simple docstring'''
__lowerCamelCase : Dict = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : List[str] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
__lowerCamelCase : Dict = 1
return chart
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase : Any = int(input("Enter the no. of variables\n" ) )
__lowerCamelCase : List[str] = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
__lowerCamelCase : List[str] = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 64 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowercase__ : Optional[Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowercase__ : int = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase_ : Tuple = json.loads(f.read() )
lowerCAmelCase_ : List[str] = collections.OrderedDict()
lowerCAmelCase_ : int = collections.OrderedDict()
lowerCAmelCase_ : Any = collections.OrderedDict()
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase_ : Dict = f.readlines()
lowerCAmelCase_ : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : List[Any] = b
lowerCAmelCase_ : Optional[int] = idx
for wd in b:
lowerCAmelCase_ : int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Any="<|startoftext|>" , SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Optional[int]=False , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(
unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCAmelCase_ : Optional[int] = do_clean_text
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Any = ''.join(__UpperCamelCase ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : "Conversation" ):
lowerCAmelCase_ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
lowerCAmelCase_ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
lowerCAmelCase_ : Dict = 0
if os.path.isdir(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase_ : Any = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCAmelCase_ : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCAmelCase_ : Optional[int] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase_ : int = token_index
writer.write(','.join(__UpperCamelCase ) + '\n' )
index += 1
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , __UpperCamelCase )
return vocab_file, emoji_file
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Optional[int] = vocab # same as swe
lowerCAmelCase_ : Tuple = ids_to_tokens # same as bpe
lowerCAmelCase_ : int = emoji
lowerCAmelCase_ : List[str] = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] )
lowerCAmelCase_ : Dict = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCAmelCase_ : Dict = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCAmelCase_ : Optional[int] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCAmelCase_ : Optional[Any] = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase_ : List[Any] = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase_ : Any = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCAmelCase_ : int = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCAmelCase_ : Any = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCAmelCase_ : List[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Any ):
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Union[str, Any] = self.content_repattera.sub('<URL>' , __UpperCamelCase )
lowerCAmelCase_ : Dict = self.content_repattera.sub('<EMAIL>' , __UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = self.content_repattera.sub('<TEL>' , __UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = self.content_repattera.sub('<DATE>' , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = self.content_repattera.sub('<DATE>' , __UpperCamelCase )
lowerCAmelCase_ : Tuple = self.content_repattera.sub('<PRICE>' , __UpperCamelCase )
lowerCAmelCase_ : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase_ : Optional[int] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple=False ):
lowerCAmelCase_ : List[str] = text.replace(' ' , '<SP>' )
lowerCAmelCase_ : Optional[Any] = text.replace(' ' , '<SP>' )
lowerCAmelCase_ : Optional[int] = text.replace('\r\n' , '<BR>' )
lowerCAmelCase_ : Tuple = text.replace('\n' , '<BR>' )
lowerCAmelCase_ : Optional[int] = text.replace('\r' , '<BR>' )
lowerCAmelCase_ : Tuple = text.replace('\t' , '<TAB>' )
lowerCAmelCase_ : int = text.replace('—' , 'ー' )
lowerCAmelCase_ : int = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase_ : Union[str, Any] = text.replace(__UpperCamelCase , __UpperCamelCase )
if clean:
lowerCAmelCase_ : int = self.clean_text(__UpperCamelCase )
def check_simbol(SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Union[str, Any] = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2:
lowerCAmelCase_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Dict = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3:
lowerCAmelCase_ : List[str] = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Union[str, Any] = []
while pos < len(__UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCAmelCase_ : Dict = [] # (token_id, token, pos)
for e in range(__UpperCamelCase , __UpperCamelCase , -1 ):
lowerCAmelCase_ : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCamelCase ) > 2:
lowerCAmelCase_ : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCamelCase ) > 0:
# the smallest token_id is adopted
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Any = sorted(__UpperCamelCase , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )[0]
result.append(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = e
else:
lowerCAmelCase_ : Optional[int] = pos + 1
lowerCAmelCase_ : List[Any] = text[pos:end]
if check_simbol(__UpperCamelCase ):
result.append('<KIGOU>' )
elif checkuae(__UpperCamelCase ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCAmelCase_ : List[str] = end
return result
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]="\n" ):
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase_ : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(__UpperCamelCase )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase_ : int = ''.join(__UpperCamelCase )
return text
| 224 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( snake_case_ : list[int] ,snake_case_ : int ) ->list[list[int]]:
'''simple docstring'''
__A : list[list[int]] = []
__A : list[int] = []
__A : Union[str, Any] = 0
__A : Dict = sum(snake_case_ )
create_state_space_tree(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
return result
def __lowercase ( snake_case_ : list[int] ,snake_case_ : int ,snake_case_ : int ,snake_case_ : list[int] ,snake_case_ : list[list[int]] ,snake_case_ : int ,) ->None:
'''simple docstring'''
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ ,len(snake_case_ ) ):
create_state_space_tree(
snake_case_ ,snake_case_ ,index + 1 ,[*path, nums[index]] ,snake_case_ ,remaining_nums_sum - nums[index] ,)
a_ = [3, 34, 4, 12, 5, 2]
a_ = 9
a_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 291 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( snake_case_ : int ) ->str:
'''simple docstring'''
if not isinstance(snake_case_ ,snake_case_ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
__A : int = precision
__A : Tuple = ceil(precision / 14 )
__A : Dict = 426880 * Decimal(10005 ).sqrt()
__A : Optional[Any] = 1
__A : int = 13591409
__A : Optional[int] = Decimal(snake_case_ )
for k in range(1 ,snake_case_ ):
__A : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 291 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
UpperCAmelCase = 2
UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCamelCase__ )
if n > 1:
factors.append(UpperCamelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
A_ : Tuple = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : List[Any] = do_resize
A_ : List[str] = size
A_ : Dict = resample
A_ : int = do_rescale
A_ : str = rescale_factor
A_ : Tuple = do_center_crop
A_ : Tuple = crop_size
A_ : List[str] = do_flip_channel_order
def _a (self , lowercase , lowercase , lowercase = PIL.Image.BILINEAR , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Any = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Tuple = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
return flip_channel_order(lowercase , data_format=lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = resample if resample is not None else self.resample
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A_ : Union[str, Any] = size if size is not None else self.size
A_ : Dict = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Union[str, Any] = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : Union[str, Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : str = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : Dict = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A_ : List[str] = [self.flip_channel_order(image=lowercase ) for image in images]
A_ : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : str = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase ):
A_ : Dict = target_sizes.numpy()
A_ : Union[str, Any] = []
for idx in range(len(lowercase ) ):
A_ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase )
A_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
A_ : str = logits.argmax(dim=1 )
A_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 135 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Dict = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 135 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A : List[Any], A : Tuple, A : int, A : List[str] ):
'''simple docstring'''
a = FunnelConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
a = FunnelBaseModel(A ) if base_model else FunnelModel(A )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A, A, A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), A )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 107 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
"""simple docstring"""
import os
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "input.txt" ):
with open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) as input_file:
_lowercase : Dict = [
[int(__UpperCAmelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_lowercase : Optional[int] = len(__UpperCAmelCase )
_lowercase : Tuple = len(matrix[0] )
_lowercase : Any = [[-1 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
_lowercase : Optional[int] = matrix[i][0]
for j in range(1 , __UpperCAmelCase ):
for i in range(__UpperCAmelCase ):
_lowercase : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __UpperCAmelCase ):
_lowercase : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 336 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case_ :Tuple = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case_ :List[Any] = 0.01
with locka.acquire():
with pytest.raises(_lowercase ):
snake_case_ :Optional[Any] = time.time()
locka.acquire(_lowercase )
assert time.time() - _start > timeout
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :int = """a""" * 1000 + """.lock"""
snake_case_ :Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowercase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case_ :List[str] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowercase ):
locka.acquire(0 )
| 66 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( ) ->List[Any]:
for n in range(1, 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->str:
A__ : Union[str, Any] = 1
A__ : Tuple = 2
while i * i <= n:
A__ : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCAmelCase ( ) ->Any:
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 296 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :List[str] = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :int = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
A_ :Union[str, Any] = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A_ :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 |
import random
from .binary_exp_mod import bin_exp_mod
def A ( a_ ,a_=1_000 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase : List[Any] =n - 1
__UpperCamelCase : Dict =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase : Optional[Any] =0
while count < prec:
__UpperCamelCase : Dict =random.randint(2 ,n - 1 )
__UpperCamelCase : Optional[Any] =bin_exp_mod(a_ ,a_ ,a_ )
if b != 1:
__UpperCamelCase : List[str] =True
for _ in range(a_ ):
if b == n - 1:
__UpperCamelCase : Tuple =False
break
__UpperCamelCase : Dict =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ :str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 71 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
def _lowerCamelCase( a , a ):
def run_func(a ):
@wraps(_UpperCamelCase )
def run_in_eager_mode(*a , **a ):
return func(*_UpperCamelCase , **_UpperCamelCase )
@wraps(_UpperCamelCase )
@tf.function(experimental_compile=_UpperCamelCase )
def run_in_graph_mode(*a , **a ):
return func(*_UpperCamelCase , **_UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _lowerCamelCase( a , a , a ):
__a = random.Random()
__a = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case__ ( _UpperCamelCase ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def a__ ( self ):
return tf.__version__
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# initialize GPU on separate process
__a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__a = self._prepare_inference_func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._measure_speed(_inference )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__a = self._prepare_train_func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._measure_speed(_train )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _SCREAMING_SNAKE_CASE )
__a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__a = self._prepare_inference_func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._measure_memory(_inference )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _SCREAMING_SNAKE_CASE )
__a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__a = self._prepare_train_func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._measure_memory(_train )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__a = (
hasattr(_SCREAMING_SNAKE_CASE , "architectures" )
and isinstance(config.architectures , _SCREAMING_SNAKE_CASE )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__a = __import__("transformers" , fromlist=[model_class] )
__a = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = model_cls(_SCREAMING_SNAKE_CASE )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__a = TF_MODEL_MAPPING[config.__class__](_SCREAMING_SNAKE_CASE )
# encoder-decoder has vocab size saved differently
__a = config.vocab_size if hasattr(_SCREAMING_SNAKE_CASE , "vocab_size" ) else config.encoder.vocab_size
__a = random_input_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
__a = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__a = (
hasattr(_SCREAMING_SNAKE_CASE , "architectures" )
and isinstance(config.architectures , _SCREAMING_SNAKE_CASE )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__a = __import__("transformers" , fromlist=[model_class] )
__a = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = model_cls(_SCREAMING_SNAKE_CASE )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__a = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_SCREAMING_SNAKE_CASE )
# encoder-decoder has vocab size saved differently
__a = config.vocab_size if hasattr(_SCREAMING_SNAKE_CASE , "vocab_size" ) else config.encoder.vocab_size
__a = random_input_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__a = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )[0]
__a = tf.gradients(_SCREAMING_SNAKE_CASE , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__a = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )[0]
__a = tf.gradients(_SCREAMING_SNAKE_CASE , model.trainable_variables )
return gradients
__a = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def a__ ( self , lowerCamelCase ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_SCREAMING_SNAKE_CASE , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__a = timeit.repeat(
_SCREAMING_SNAKE_CASE , repeat=self.args.repeat , number=10 , )
return min(_SCREAMING_SNAKE_CASE ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn\'t fit on GPU. {e}" )
def a__ ( self , lowerCamelCase ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__a = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won\'t log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__a = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__a = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__a = nvml.nvmlDeviceGetMemoryInfo(_SCREAMING_SNAKE_CASE )
__a = meminfo.used
__a = Memory(_SCREAMING_SNAKE_CASE )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__a = None
else:
__a = measure_peak_memory_cpu(_SCREAMING_SNAKE_CASE )
__a = Memory(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else memory_bytes
if self.args.trace_memory_line_by_line:
__a = stop_memory_tracing(_SCREAMING_SNAKE_CASE )
if memory is None:
__a = summary.total
else:
__a = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn\'t fit on GPU. {e}" )
return "N/A", None
| 368 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__:Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def _lowerCamelCase( a ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowerCamelCase( a ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowerCamelCase( ):
__a = "Morse code here!"
print(a )
__a = encrypt(a )
print(a )
__a = decrypt(a )
print(a )
if __name__ == "__main__":
main()
| 268 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_lowerCAmelCase : Union[str, Any] = TypeVar("T")
_lowerCAmelCase : Tuple = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self : Tuple , A : T | None , A : U | None ):
_UpperCAmelCase : List[Any] = key
_UpperCAmelCase : Any = val
_UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None
_UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self : Union[str, Any] ):
_UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(A , A )
_UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(A , A )
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.rear, self.head
def __repr__( self : int ):
_UpperCAmelCase : List[Any] = ["DoubleLinkedList"]
_UpperCAmelCase : Optional[Any] = self.head
while node.next is not None:
rep.append(str(A ) )
_UpperCAmelCase : Optional[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(A )
def snake_case_ ( self : Optional[int] , A : DoubleLinkedListNode[T, U] ):
_UpperCAmelCase : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_UpperCAmelCase : int = node
_UpperCAmelCase : int = previous
_UpperCAmelCase : Optional[Any] = node
_UpperCAmelCase : Any = self.rear
def snake_case_ ( self : List[Any] , A : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_UpperCAmelCase : Optional[Any] = node.next
_UpperCAmelCase : List[str] = node.prev
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Union[str, Any] = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
__SCREAMING_SNAKE_CASE : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[str] , A : int ):
_UpperCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_UpperCAmelCase : Optional[int] = capacity
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Any ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : Union[str, Any] , A : T ):
return key in self.cache
def snake_case_ ( self : Dict , A : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_UpperCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_UpperCAmelCase : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(A )
return node.val
self.miss += 1
return None
def snake_case_ ( self : Any , A : T , A : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_UpperCAmelCase : Any = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_UpperCAmelCase : Any = DoubleLinkedListNode(A , A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_UpperCAmelCase : Union[str, Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_UpperCAmelCase : List[str] = value
self.list.add(A )
@classmethod
def snake_case_ ( cls : int , A : int = 1_2_8 ):
def cache_decorator_inner(A : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*A : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_UpperCAmelCase : Optional[int] = LRUCache(A )
_UpperCAmelCase : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_UpperCAmelCase : Union[str, Any] = func(*A )
cls.decorator_function_to_instance_map[func].put(args[0] , A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(A , "cache_info" , A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] ) -> None:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 202 | 1 |
"""simple docstring"""
A_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Dict = [False] * len(snake_case__ )
_snake_case : str = [s]
_snake_case : Tuple = True
while queue:
_snake_case : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case__ )
_snake_case : int = True
_snake_case : str = u
return visited[t]
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Dict = [-1] * (len(snake_case__ ))
_snake_case : str = 0
_snake_case : Union[str, Any] = []
_snake_case : Optional[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
_snake_case : List[str] = float("""Inf""" )
_snake_case : Optional[int] = sink
while s != source:
# Find the minimum value in select path
_snake_case : Any = min(snake_case__ , graph[parent[s]][s] )
_snake_case : Optional[Any] = parent[s]
max_flow += path_flow
_snake_case : int = sink
while v != source:
_snake_case : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : Optional[Any] = parent[v]
for i in range(len(snake_case__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 64 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{\"default\": {\"dataset_size\": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(_a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(_a )
dataset_info.write_to_directory(_a )
snake_case_ = DatasetInfo.from_directory(_a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_a , 'dataset_info.json' ) )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(_a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(_a )
snake_case_ = yaml.safe_load(_a )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(_a )
dataset_infos_dict.write_to_directory(_a )
snake_case_ = DatasetInfosDict.from_directory(_a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_a , 'README.md' ) )
| 361 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=7 , snake_case=9 , snake_case=True , snake_case=True , snake_case=False , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=8 , snake_case=0.1 , snake_case=0.0_02 , snake_case=1 , snake_case=0 , snake_case=0 , snake_case=None , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = encoder_seq_length
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = d_ff
snake_case_ = relative_attention_num_buckets
snake_case_ = dropout_rate
snake_case_ = initializer_factor
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = None
snake_case_ = decoder_layers
def a ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def a ( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = config.num_attention_heads
snake_case_ = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def a ( self ):
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
snake_case_ = model(input_ids=snake_case , decoder_input_ids=snake_case )
snake_case_ = result.last_hidden_state
snake_case_ = result.past_key_values
snake_case_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
snake_case_ = model(snake_case , use_cache=snake_case )
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(snake_case )['last_hidden_state']
snake_case_ = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def a ( self , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
snake_case_ = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__SCREAMING_SNAKE_CASE : List[str] = [0.8, 0.9]
def a ( self ):
snake_case_ = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def a ( self ):
snake_case_ = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = config_and_inputs[0]
snake_case_ = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
snake_case_ = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
snake_case_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
snake_case_ = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def a ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def a ( self ):
snake_case_ = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
snake_case_ = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
snake_case_ = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
snake_case_ = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
snake_case_ = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
snake_case_ = model.generate(input_ids.to(snake_case ) )
snake_case_ = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 200 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : Any = split if split or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else """train"""
SCREAMING_SNAKE_CASE : Optional[Any] = features
SCREAMING_SNAKE_CASE : Optional[Any] = cache_dir
SCREAMING_SNAKE_CASE : List[Any] = keep_in_memory
SCREAMING_SNAKE_CASE : Optional[int] = streaming
SCREAMING_SNAKE_CASE : str = num_proc
SCREAMING_SNAKE_CASE : Any = kwargs
@abstractmethod
def _lowercase ( self : Optional[int] ) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : List[str] , ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Optional[Any] = keep_in_memory
SCREAMING_SNAKE_CASE : str = streaming
SCREAMING_SNAKE_CASE : List[Any] = num_proc
SCREAMING_SNAKE_CASE : Tuple = kwargs
@abstractmethod
def _lowercase ( self : Any ) ->Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 245 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int ="""openai/whisper-base"""
UpperCAmelCase__ : Dict =(
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase__ : List[str] ="""transcriber"""
UpperCAmelCase__ : Union[str, Any] =WhisperProcessor
UpperCAmelCase__ : Union[str, Any] =WhisperForConditionalGeneration
UpperCAmelCase__ : Tuple =["""audio"""]
UpperCAmelCase__ : List[Any] =["""text"""]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors="""pt""" ).input_features
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 245 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : int ):
debug_launcher(test_script.main )
def lowerCamelCase ( self : int ):
debug_launcher(test_ops.main )
| 360 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a_ : Any , a_ : Any=None , a_ : int=None , a_ : str=None , a_ : Optional[int]="resnet50" , a_ : str=3 , a_ : str=32 , a_ : Union[str, Any]=3 , a_ : Tuple=True , a_ : List[str]=True , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Dict = out_indices if out_indices is not None else [4]
lowerCAmelCase_ : int = stage_names
lowerCAmelCase_ : Optional[Any] = out_features
lowerCAmelCase_ : Tuple = backbone
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[Any] = num_channels
lowerCAmelCase_ : Optional[int] = use_pretrained_backbone
lowerCAmelCase_ : List[Any] = is_training
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : Dict ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase ( self : Union[str, Any] , a_ : str , a_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = config_and_inputs
lowerCAmelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
a_ : int = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ : Union[str, Any] = False
a_ : str = False
a_ : List[Any] = False
a_ : Dict = False
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = TimmBackboneModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCamelCase ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = "resnet18"
lowerCAmelCase_ : List[Any] = "microsoft/resnet-18"
lowerCAmelCase_ : Tuple = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
lowerCAmelCase_ : str = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase_ : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
lowerCAmelCase_ : Any = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowerCamelCase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : str ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : List[str] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowerCamelCase ( self : int ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(a_ )
lowerCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase_ : int = self.all_model_classes[0]
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase_ : str = model(**a_ )
lowerCAmelCase_ : Any = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase_ : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase_ : Optional[int] = copy.deepcopy(a_ )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Any = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase_ : str = copy.deepcopy(a_ )
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(**a_ )
| 161 | 0 |
from math import pi, sqrt
def __snake_case ( _UpperCAmelCase ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(_UpperCAmelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(_UpperCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __snake_case ( ):
assert gamma(0.5 ) == sqrt(_UpperCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Union[str, Any] = 1.0
while num:
__snake_case :Union[str, Any] = float(input('''Gamma of: '''))
print(f'gamma({num}) = {gamma(num)}')
print('''\nEnter 0 to exit...''')
| 49 |
def a_ ( _A = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 | 0 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a__ ( snake_case__ = True , *snake_case__ , **snake_case__ ) -> str:
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCamelCase = False
if main_process_only:
lowerCamelCase = PartialState().local_process_index == 0
return _tqdm(*A__ , **A__ , disable=A__ )
| 351 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = None , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = os.path.join(
_a , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase = cached_features_file + """.lock"""
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCamelCase = torch.load(_a )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCamelCase = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("""Training examples: %s""" , len(_a ) )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("""Saving features into cached file %s""" , _a )
torch.save(self.features , _a )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = 128 , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
lowerCamelCase = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase = tf.data.Dataset.from_generator(
_a , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = []
for i, line in enumerate(_a ):
if i == 0:
continue
lowerCamelCase = """%s-%s""" % (set_type, line[0])
lowerCamelCase = line[5]
lowerCamelCase = line[6]
lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple:
lowerCamelCase = {label: i for i, label in enumerate(snake_case__ )}
lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case__ ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , )
lowerCamelCase = label_map[example.label] if example.label in label_map else 0
lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
lowerCAmelCase : List[str] = {
"""hans""": 3,
}
lowerCAmelCase : str = {
"""hans""": HansProcessor,
}
| 168 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.