code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251 | 1 |
"""simple docstring"""
from random import randint, random
def UpperCAmelCase ( a_, a_, a_, a_ = False, a_ = False, a_ = 5, ):
'''simple docstring'''
lowerCamelCase : str = [[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase : Any = 0
lowerCamelCase : List[str] = max(a_, 0 )
while i < number_of_cells:
lowerCamelCase : Union[str, Any] = (
randint(0, a_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1, max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = 0
lowerCamelCase : str = highway_now[car_index + 1 :]
for cell in range(len(a_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a_, -1 )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = len(a_ )
# Beforce calculations, the highway is empty
lowerCamelCase : str = [-1] * number_of_cells
for car_index in range(a_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase : Any = min(highway_now[car_index] + 1, a_ )
# Number of empty cell before the next car
lowerCamelCase : str = get_distance(a_, a_ ) - 1
# We can't have the car causing an accident
lowerCamelCase : Tuple = min(next_highway[car_index], a_ )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase : Optional[int] = max(next_highway[car_index] - 1, 0 )
return next_highway
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = len(highway[0] )
for i in range(a_ ):
lowerCamelCase : Dict = update(highway[i], a_, a_ )
lowerCamelCase : Any = [-1] * number_of_cells
for car_index in range(a_ ):
lowerCamelCase : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase : Optional[Any] = speed
highway.append(a_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
"""simple docstring"""
def UpperCAmelCase ( ):
'''simple docstring'''
return 1
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def UpperCAmelCase ( a_ = 200 ):
'''simple docstring'''
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 205 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase : Optional[int] = 'bert-base-cased'
_lowerCamelCase : List[Any] = 'fp16'
_lowerCamelCase : int = 'bf16'
_lowerCamelCase : Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
'''simple docstring'''
super().setUp()
A__ = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(snake_case_):
A__ = self.dist_env.copy()
A__ = f"""{i + 1}"""
A__ = strategy
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(snake_case_):
A__ = self.dist_env.copy()
A__ = prefetch_policy
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(snake_case_):
A__ = self.dist_env.copy()
A__ = state_dict_type
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = AutoModel.from_pretrained(snake_case_)
for policy in FSDP_AUTO_WRAP_POLICY:
A__ = self.dist_env.copy()
A__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
A__ = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
A__ = '2000'
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case_)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
A__ = self.dist_env.copy()
A__ = 'TRANSFORMER_BASED_WRAP'
A__ = 'T5Layer'
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
with self.assertRaises(snake_case_) as cm:
fsdp_plugin.set_auto_wrap_policy(snake_case_)
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception))
A__ = self.dist_env.copy()
A__ = 'SIZE_BASED_WRAP'
A__ = '0'
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case_)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
A__ = self.dist_env.copy()
A__ = mp_dtype
with mockenv_context(**snake_case_):
A__ = Accelerator()
if mp_dtype == "fp16":
A__ = torch.floataa
elif mp_dtype == "bf16":
A__ = torch.bfloataa
A__ = MixedPrecision(param_dtype=snake_case_ , reduce_dtype=snake_case_ , buffer_dtype=snake_case_)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case_)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , snake_case_))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(snake_case_)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
A__ = self.dist_env.copy()
A__ = str(snake_case_).lower()
with mockenv_context(**snake_case_):
A__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case_))
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
super().setUp()
A__ = 0.82
A__ = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
A__ = {
'multi_gpu_fp16': 3_200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2_000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
A__ = 160
A__ = 160
A__ = inspect.getfile(accelerate.test_utils)
A__ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps'''])
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = os.path.join(self.test_scripts_folder , '''test_performance.py''')
A__ = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
A__ = cmd.copy()
for i, strategy in enumerate(snake_case_):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""")
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''')
else:
cmd_config.append('''--mixed_precision=fp16''')
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(snake_case_ , env=os.environ.copy())
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
A__ = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''')
A__ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(snake_case_):
A__ = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""")
if strategy != "FULL_SHARD":
continue
A__ = len(snake_case_)
for state_dict_type in FSDP_STATE_DICT_TYPE:
A__ = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""")
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(snake_case_ , env=os.environ.copy())
A__ = cmd_config[:-1]
A__ = os.path.join(self.tmpdir , '''epoch_0''')
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(snake_case_ , env=os.environ.copy())
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''')
A__ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
A__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''])
else:
cmd_config.extend(['''--mixed_precision=no'''])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''])
for i, strategy in enumerate(snake_case_):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""")
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(snake_case_ , env=os.environ.copy())
| 14 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 286 | 0 |
from __future__ import annotations
def __lowerCamelCase ( _lowercase ) -> int:
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a : List[str] = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A=None ) -> Union[str, Any]:
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
UpperCAmelCase : Optional[Any] = None
def _lowercase( self , A ) -> List[Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
UpperCAmelCase : Tuple = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase : str = str(distributed_port + 1 )
UpperCAmelCase : Any = dist.new_group(ranks=A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def _lowercase( self , A , A , A=torch.floataa ) -> str:
UpperCAmelCase : List[Any] = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , A )
return ifname
def _lowercase( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase : str = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
UpperCAmelCase : int = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase : int = None
if self._is_main():
UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
UpperCAmelCase : List[Any] = question_hidden_states.shape[0]
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = []
if self._is_main():
assert len(A ) == world_size
UpperCAmelCase , UpperCAmelCase : Optional[int] = self._main_retrieve(torch.cat(A ).numpy() , A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = torch.tensor(A ), torch.tensor(A )
UpperCAmelCase : List[str] = self._chunk_tensor(A , A )
UpperCAmelCase : Union[str, Any] = self._chunk_tensor(A , A )
UpperCAmelCase : Tuple = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase : Optional[Any] = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 338 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : str = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
lowercase_ : Union[str, Any] = hex_num[0] == '-'
if is_negative:
lowercase_ : Dict = hex_num[1:]
try:
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
lowercase_ : Optional[int] = ''
while int_num > 0:
lowercase_ : List[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 | """simple docstring"""
__SCREAMING_SNAKE_CASE =[
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 213 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
# save results
if os.path.exists(a ):
if os.path.exists(os.path.join(a , '''config.json''' ) ) and os.path.isfile(
os.path.join(a , '''config.json''' ) ):
os.remove(os.path.join(a , '''config.json''' ) )
if os.path.exists(os.path.join(a , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(a , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(a , '''pytorch_model.bin''' ) )
else:
os.makedirs(a )
model.save_pretrained(a )
def lowerCamelCase__ ( a , a=False ) -> Union[str, Any]:
_A: str = 2
if unlogit:
_A: Optional[Any] = torch.pow(a , a )
_A: Dict = p * torch.log(a )
_A: int = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( a ) -> Dict:
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(a ) ) ) )
for row in range(len(a ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( a , a , a , a=True , a=True , a=None , a=False ) -> List[Any]:
_A: str = model.config.num_hidden_layers, model.config.num_attention_heads
_A: List[str] = torch.zeros(a , a ).to(args.device )
_A: int = torch.zeros(a , a ).to(args.device )
if head_mask is None:
_A: Optional[Any] = torch.ones(a , a ).to(args.device )
head_mask.requires_grad_(requires_grad=a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A: List[Any] = None
_A: str = 0.0
_A: int = 0.0
for step, inputs in enumerate(tqdm(a , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_A: List[str] = tuple(t.to(args.device ) for t in inputs )
(_A ): Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A: List[str] = model(a , labels=a , head_mask=a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A: int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(a ):
_A: Optional[Any] = entropy(attn.detach() , a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A: Optional[Any] = 2
_A: Any = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_A: Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(a )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(a )
logger.info('''Head ranked by importance scores''' )
_A: Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A: List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_A: Dict = head_ranks.view_as(a )
print_ad_tensor(a )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A: Optional[Any] = compute_heads_importance(a , a , a , compute_entropy=a )
_A: List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , a , original_score * args.masking_threshold )
_A: Tuple = torch.ones_like(a )
_A: Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A: List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
_A: str = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A: List[Any] = float('''Inf''' )
_A: Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(a ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_A: List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_A: List[Any] = new_head_mask.view(-1 )
_A: Dict = 0.0
_A: List[str] = new_head_mask.view_as(a )
_A: List[Any] = new_head_mask.clone().detach()
print_ad_tensor(a )
# Compute metric and head importance again
_A: Dict = compute_heads_importance(
a , a , a , compute_entropy=a , head_mask=a )
_A: List[str] = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(a )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( a , a , a , a ) -> Union[str, Any]:
_A: List[Any] = datetime.now()
_A: Optional[int] = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a )
_A: List[Any] = 1 / loss
_A: List[Any] = datetime.now() - before_time
_A: Optional[int] = sum(p.numel() for p in model.parameters() )
_A: List[str] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) )
}
for k, v in heads_to_prune.items():
if isinstance(a , a ):
_A: Union[str, Any] = [
v,
]
assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(a )
_A: Dict = sum(p.numel() for p in model.parameters() )
_A: Tuple = datetime.now()
_A: List[Any] = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , )
_A: str = 1 / loss
_A: List[str] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , a , a , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , a , a )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(a , args.output_dir )
def lowerCamelCase__ ( ) -> int:
_A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=a , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=a , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=a , type=a , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=a , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=a , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=a , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=a , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=a , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=a , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=a , default=42 )
parser.add_argument('''--local_rank''' , type=a , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
_A: Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A: Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_A: Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A: Optional[int] = torch.device('''cuda''' , args.local_rank )
_A: List[Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A: List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A: str = nn.parallel.DistributedDataParallel(
a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a )
elif args.n_gpu > 1:
_A: Optional[int] = nn.DataParallel(a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=a )
torch.save(a , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , a )
# Prepare dataset
_A: str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A: Tuple = (torch.from_numpy(a ),)
_A: Optional[Any] = TensorDataset(*a )
_A: Any = RandomSampler(a )
_A: Optional[Any] = DataLoader(a , sampler=a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(a , a , a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A: Optional[Any] = mask_heads(a , a , a )
prune_heads(a , a , a , a )
if __name__ == "__main__":
main()
| 356 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301 | 0 |
from math import sqrt
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 0
for i in range(1, int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def __magic_name__ ( A : int = 10000 ):
'''simple docstring'''
a = sum(
i
for i in range(1, A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107 | from torch import nn
class _lowercase ( nn.Module ):
def __init__( self : Any , snake_case : Dict , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : List[Any] = class_size
UpperCamelCase_ : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase_ : int = nn.Linear(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.mlp(snake_case )
return logits
| 175 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"""7B""": 1_1_0_0_8,
"""13B""": 1_3_8_2_4,
"""30B""": 1_7_9_2_0,
"""65B""": 2_2_0_1_6,
"""70B""": 2_8_6_7_2,
}
_SCREAMING_SNAKE_CASE : str = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _lowerCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : List[Any]=256 ):
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
with open(UpperCAmelCase , '''r''' ) as f:
return json.load(UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Dict ):
'''simple docstring'''
with open(UpperCAmelCase , '''w''' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=True ):
'''simple docstring'''
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
UpperCamelCase__ : List[str] =os.path.join(UpperCAmelCase , '''tmp''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] =read_json(os.path.join(UpperCAmelCase , '''params.json''' ) )
UpperCamelCase__ : List[str] =NUM_SHARDS[model_size]
UpperCamelCase__ : Optional[int] =params['''n_layers''']
UpperCamelCase__ : Tuple =params['''n_heads''']
UpperCamelCase__ : List[Any] =n_heads // num_shards
UpperCamelCase__ : Tuple =params['''dim''']
UpperCamelCase__ : Optional[int] =dim // n_heads
UpperCamelCase__ : int =10000.0
UpperCamelCase__ : List[Any] =1.0 / (base ** (torch.arange(0 , UpperCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCamelCase__ : List[Any] =params['''n_kv_heads'''] # for GQA / MQA
UpperCamelCase__ : List[str] =n_heads_per_shard // num_key_value_heads
UpperCamelCase__ : Union[str, Any] =dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCamelCase__ : Optional[Any] =n_heads
UpperCamelCase__ : Optional[Any] =n_heads_per_shard
UpperCamelCase__ : int =dim
# permute for sliced rotary
def permute(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=n_heads , UpperCAmelCase : List[str]=dim , UpperCAmelCase : Optional[Any]=dim ):
return w.view(UpperCAmelCase , dima // n_heads // 2 , 2 , UpperCAmelCase ).transpose(1 , 2 ).reshape(UpperCAmelCase , UpperCAmelCase )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCamelCase__ : str =torch.load(os.path.join(UpperCAmelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
UpperCamelCase__ : str =[
torch.load(os.path.join(UpperCAmelCase , F'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(UpperCAmelCase )
]
UpperCamelCase__ : Dict =0
UpperCamelCase__ : Optional[Any] ={'''weight_map''': {}}
for layer_i in range(UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] =F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCamelCase__ : Optional[int] ={
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCamelCase__ : Dict ={
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
UpperCamelCase__ : Dict =permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for i in range(UpperCAmelCase )
] , dim=0 , ).reshape(UpperCAmelCase , UpperCAmelCase ) )
UpperCamelCase__ : List[Any] =permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for i in range(UpperCAmelCase )
] , dim=0 , ).reshape(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
UpperCamelCase__ : int =torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for i in range(UpperCAmelCase )
] , dim=0 , ).reshape(UpperCAmelCase , UpperCAmelCase )
UpperCamelCase__ : int =torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(UpperCAmelCase )] , dim=1 )
UpperCamelCase__ : Tuple =torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(UpperCAmelCase )] , dim=0 )
UpperCamelCase__ : Dict =torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(UpperCAmelCase )] , dim=1 )
UpperCamelCase__ : str =torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(UpperCAmelCase )] , dim=0 )
UpperCamelCase__ : str =inv_freq
for k, v in state_dict.items():
UpperCamelCase__ : List[Any] =filename
param_count += v.numel()
torch.save(UpperCAmelCase , os.path.join(UpperCAmelCase , UpperCAmelCase ) )
UpperCamelCase__ : Tuple =F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCamelCase__ : str ={
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
UpperCamelCase__ : str ={
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCAmelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCamelCase__ : Tuple =filename
param_count += v.numel()
torch.save(UpperCAmelCase , os.path.join(UpperCAmelCase , UpperCAmelCase ) )
# Write configs
UpperCamelCase__ : Tuple ={'''total_size''': param_count * 2}
write_json(UpperCAmelCase , os.path.join(UpperCAmelCase , '''pytorch_model.bin.index.json''' ) )
UpperCamelCase__ : List[Any] =params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
UpperCamelCase__ : Any =params['''multiple_of'''] if '''multiple_of''' in params else 256
UpperCamelCase__ : List[str] =LlamaConfig(
hidden_size=UpperCAmelCase , intermediate_size=compute_intermediate_size(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCAmelCase , )
config.save_pretrained(UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
UpperCamelCase__ : Dict =LlamaForCausalLM.from_pretrained(UpperCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCAmelCase , safe_serialization=UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : int ):
'''simple docstring'''
UpperCamelCase__ : Any =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
UpperCamelCase__ : List[str] =tokenizer_class(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : int =argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCAmelCase , help='''Whether or not to save using `safetensors`.''' )
UpperCamelCase__ : Dict =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCamelCase__ : Any =os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCAmelCase )
if __name__ == "__main__":
main()
| 157 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'unispeech'
def __init__( self : List[Any] , lowercase_ : Tuple=32 , lowercase_ : int=768 , lowercase_ : List[Any]=12 , lowercase_ : Optional[int]=12 , lowercase_ : Union[str, Any]=3072 , lowercase_ : Any="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.0_2 , lowercase_ : int=1e-5 , lowercase_ : Dict="group" , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[Any]=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Any=False , lowercase_ : Dict=128 , lowercase_ : List[str]=16 , lowercase_ : Any=False , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=0.0_5 , lowercase_ : int=10 , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=10 , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=320 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Dict=100 , lowercase_ : Optional[int]=256 , lowercase_ : Dict=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : str="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Any=False , lowercase_ : Union[str, Any]=256 , lowercase_ : List[str]=80 , lowercase_ : Dict=0 , lowercase_ : int=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.5 , **lowercase_ : str , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCamelCase__ : Dict =hidden_size
UpperCamelCase__ : Optional[int] =feat_extract_norm
UpperCamelCase__ : Dict =feat_extract_activation
UpperCamelCase__ : Union[str, Any] =list(lowercase_ )
UpperCamelCase__ : int =list(lowercase_ )
UpperCamelCase__ : Tuple =list(lowercase_ )
UpperCamelCase__ : List[str] =conv_bias
UpperCamelCase__ : Any =num_conv_pos_embeddings
UpperCamelCase__ : List[Any] =num_conv_pos_embedding_groups
UpperCamelCase__ : Optional[int] =len(self.conv_dim )
UpperCamelCase__ : Union[str, Any] =num_hidden_layers
UpperCamelCase__ : Optional[Any] =intermediate_size
UpperCamelCase__ : Any =hidden_act
UpperCamelCase__ : List[Any] =num_attention_heads
UpperCamelCase__ : List[Any] =hidden_dropout
UpperCamelCase__ : List[Any] =attention_dropout
UpperCamelCase__ : Tuple =activation_dropout
UpperCamelCase__ : Any =feat_proj_dropout
UpperCamelCase__ : Tuple =final_dropout
UpperCamelCase__ : Tuple =layerdrop
UpperCamelCase__ : int =layer_norm_eps
UpperCamelCase__ : Optional[int] =initializer_range
UpperCamelCase__ : Any =num_ctc_classes
UpperCamelCase__ : Optional[int] =vocab_size
UpperCamelCase__ : int =do_stable_layer_norm
UpperCamelCase__ : Union[str, Any] =use_weighted_layer_sum
UpperCamelCase__ : Tuple =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : List[Any] =apply_spec_augment
UpperCamelCase__ : List[Any] =mask_time_prob
UpperCamelCase__ : Optional[int] =mask_time_length
UpperCamelCase__ : Dict =mask_time_min_masks
UpperCamelCase__ : str =mask_feature_prob
UpperCamelCase__ : Union[str, Any] =mask_feature_length
UpperCamelCase__ : int =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ : Optional[Any] =num_codevectors_per_group
UpperCamelCase__ : Dict =num_codevector_groups
UpperCamelCase__ : int =contrastive_logits_temperature
UpperCamelCase__ : Tuple =feat_quantizer_dropout
UpperCamelCase__ : List[str] =num_negatives
UpperCamelCase__ : Dict =codevector_dim
UpperCamelCase__ : Any =proj_codevector_dim
UpperCamelCase__ : List[Any] =diversity_loss_weight
# ctc loss
UpperCamelCase__ : Tuple =ctc_loss_reduction
UpperCamelCase__ : List[str] =ctc_zero_infinity
# pretraining loss
UpperCamelCase__ : Optional[Any] =replace_prob
@property
def _lowerCAmelCase ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 157 | 1 |
'''simple docstring'''
import random
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowercase_ : Dict = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
lowercase_ : Optional[int] = 0
lowercase_ , lowercase_ , lowercase_ : Optional[int] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 93 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCamelCase = 2
class _snake_case :
def __init__( self ,*, # begin keyword-only arguments
_snake_case="<s>" ,_snake_case="<pad>" ,_snake_case="</s>" ,_snake_case="<unk>" ,_snake_case=None ,):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = bos, unk, pad, eos
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[str] = self.add_symbol(_snake_case )
UpperCAmelCase_ : List[str] = self.add_symbol(_snake_case )
UpperCAmelCase_ : Any = self.add_symbol(_snake_case )
UpperCAmelCase_ : int = self.add_symbol(_snake_case )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_snake_case )
UpperCAmelCase_ : Dict = len(self.symbols )
def __eq__( self ,_snake_case ):
return self.indices == other.indices
def __getitem__( self ,_snake_case ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self ,_snake_case ):
return sym in self.indices
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ):
UpperCAmelCase_ : Any = cls()
d.add_from_file(_snake_case )
return d
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=1 ,_snake_case=False ):
if word in self.indices and not overwrite:
UpperCAmelCase_ : List[Any] = self.indices[word]
UpperCAmelCase_ : str = self.count[idx] + n
return idx
else:
UpperCAmelCase_ : List[str] = len(self.symbols )
UpperCAmelCase_ : Union[str, Any] = idx
self.symbols.append(_snake_case )
self.count.append(_snake_case )
return idx
def UpperCamelCase__ ( self ,_snake_case ):
return 0
def UpperCamelCase__ ( self ,_snake_case ):
if isinstance(_snake_case ,_snake_case ):
try:
with open(_snake_case ,"r" ,encoding="utf-8" ) as fd:
self.add_from_file(_snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_snake_case ) )
return
UpperCAmelCase_ : Optional[Any] = f.readlines()
UpperCAmelCase_ : Tuple = self._load_meta(_snake_case )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = line.rstrip().rsplit(" " ,1 )
if field == "#fairseq:overwrite":
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = line.rsplit(" " ,1 )
else:
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = int(_snake_case )
UpperCAmelCase_ : str = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_snake_case ) )
self.add_symbol(_snake_case ,n=_snake_case ,overwrite=_snake_case )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = dict((re.sub(r"@@$" , "" , _SCREAMING_SNAKE_CASE ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
UpperCAmelCase_ : str = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCAmelCase_ : List[str] = d[k] # restore
return da
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , "checkpoint.pt" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCAmelCase_ : List[str] = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE , "dict.txt" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
UpperCAmelCase_ : Any = Dictionary.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["vocab_file"] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
UpperCAmelCase_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , "bpecodes" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
UpperCAmelCase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# model config
UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , "config.json" )
UpperCAmelCase_ : Tuple = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# model
UpperCAmelCase_ : Any = chkpt["model"]
# remove unneeded keys
UpperCAmelCase_ : Tuple = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase_ : Dict = model_state_dict.pop(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : int = model_state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = BioGptConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = BioGptForCausalLM(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE )
# save
UpperCAmelCase_ : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Conversion is done!" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 67 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _snake_case (unittest.TestCase):
def __init__( self ,_snake_case ,_snake_case=7 ,_snake_case=3 ,_snake_case=18 ,_snake_case=30 ,_snake_case=4_00 ,_snake_case=True ,_snake_case=None ,_snake_case=True ,_snake_case=None ,_snake_case=True ,_snake_case=[0.48145466, 0.4578275, 0.40821073] ,_snake_case=[0.26862954, 0.26130258, 0.27577711] ,_snake_case=True ,):
UpperCAmelCase_ : List[str] = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : List[Any] = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Union[str, Any] = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Tuple = image_mean
UpperCAmelCase_ : List[Any] = image_std
UpperCAmelCase_ : Dict = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self ,_snake_case=False ,_snake_case=False ,_snake_case=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
UpperCAmelCase_ : Optional[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(2_55 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase_ : Optional[int] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase_ : Optional[Any] = [torch.from_numpy(_snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ChineseCLIPImageProcessingTester(self ,do_center_crop=_snake_case )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,"do_resize" ) )
self.assertTrue(hasattr(_snake_case ,"size" ) )
self.assertTrue(hasattr(_snake_case ,"do_center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"do_normalize" ) )
self.assertTrue(hasattr(_snake_case ,"image_mean" ) )
self.assertTrue(hasattr(_snake_case ,"image_std" ) )
self.assertTrue(hasattr(_snake_case ,"do_convert_rgb" ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 2_24, "width": 2_24} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : int = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ,numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ,torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : List[str] = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
@require_torch
@require_vision
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=_snake_case )
UpperCAmelCase_ : Optional[Any] = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,"do_resize" ) )
self.assertTrue(hasattr(_snake_case ,"size" ) )
self.assertTrue(hasattr(_snake_case ,"do_center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"do_normalize" ) )
self.assertTrue(hasattr(_snake_case ,"image_mean" ) )
self.assertTrue(hasattr(_snake_case ,"image_std" ) )
self.assertTrue(hasattr(_snake_case ,"do_convert_rgb" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : Any = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 67 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100_0000 ) ->int:
'''simple docstring'''
a : Dict = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
a : Dict = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105 |
from __future__ import annotations
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
return n == n[::-1]
def _UpperCAmelCase ( snake_case = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 82 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int , A_ : int , A_ : int , A_ : int=0.0 , A_ : Optional[int] = None , A_ : str = "geglu" , A_ : Optional[int] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : str = "layer_norm" , A_ : bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = only_cross_attention
lowerCamelCase_ = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
lowerCamelCase_ = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase_ = AdaLayerNorm(A_ , A_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ = AdaLayerNormZero(A_ , A_ )
else:
lowerCamelCase_ = nn.LayerNorm(A_ , elementwise_affine=A_ )
lowerCamelCase_ = Attention(
query_dim=A_ , heads=A_ , dim_head=A_ , dropout=A_ , bias=A_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase_ = (
AdaLayerNorm(A_ , A_ )
if self.use_ada_layer_norm
else nn.LayerNorm(A_ , elementwise_affine=A_ )
)
lowerCamelCase_ = Attention(
query_dim=A_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A_ , dim_head=A_ , dropout=A_ , bias=A_ , upcast_attention=A_ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
# 3. Feed-forward
lowerCamelCase_ = nn.LayerNorm(A_ , elementwise_affine=A_ )
lowerCamelCase_ = FeedForward(A_ , dropout=A_ , activation_fn=A_ , final_dropout=A_ )
# let chunk size default to None
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Optional[Any] , A_ : Optional[int] , A_ : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = chunk_size
lowerCamelCase_ = dim
def a__ ( self : Dict , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.LongTensor] = None , A_ : Dict[str, Any] = None , A_ : Optional[torch.LongTensor] = None , ) -> List[str]:
"""simple docstring"""
if self.use_ada_layer_norm:
lowerCamelCase_ = self.norma(A_ , A_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.norma(
A_ , A_ , A_ , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase_ = self.norma(A_ )
lowerCamelCase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase_ = self.attna(
A_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A_ , **A_ , )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase_ = (
self.norma(A_ , A_ ) if self.use_ada_layer_norm else self.norma(A_ )
)
lowerCamelCase_ = self.attna(
A_ , encoder_hidden_states=A_ , attention_mask=A_ , **A_ , )
lowerCamelCase_ = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase_ = self.norma(A_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCamelCase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase_ = torch.cat(
[self.ff(A_ ) for hid_slice in norm_hidden_states.chunk(A_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase_ = self.ff(A_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase_ = ff_output + hidden_states
return hidden_states
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , A_ : int , A_ : Optional[int] = None , A_ : int = 4 , A_ : float = 0.0 , A_ : str = "geglu" , A_ : bool = False , ) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = int(dim * mult )
lowerCamelCase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase_ = GELU(A_ , A_ )
if activation_fn == "gelu-approximate":
lowerCamelCase_ = GELU(A_ , A_ , approximate='tanh' )
elif activation_fn == "geglu":
lowerCamelCase_ = GEGLU(A_ , A_ )
elif activation_fn == "geglu-approximate":
lowerCamelCase_ = ApproximateGELU(A_ , A_ )
lowerCamelCase_ = nn.ModuleList([] )
# project in
self.net.append(A_ )
# project dropout
self.net.append(nn.Dropout(A_ ) )
# project out
self.net.append(nn.Linear(A_ , A_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A_ ) )
def a__ ( self : int , A_ : Optional[Any] ) -> int:
"""simple docstring"""
for module in self.net:
lowerCamelCase_ = module(A_ )
return hidden_states
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , A_ : int , A_ : int , A_ : str = "none" ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.Linear(A_ , A_ )
lowerCamelCase_ = approximate
def a__ ( self : str , A_ : Tuple ) -> List[Any]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(A_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def a__ ( self : Optional[int] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.proj(A_ )
lowerCamelCase_ = self.gelu(A_ )
return hidden_states
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , A_ : int , A_ : int ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.Linear(A_ , dim_out * 2 )
def a__ ( self : str , A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(A_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ ( self : Any , A_ : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.proj(A_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(A_ )
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , A_ : int , A_ : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.Linear(A_ , A_ )
def a__ ( self : List[str] , A_ : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.proj(A_ )
return x * torch.sigmoid(1.702 * x )
class A( nn.Module ):
'''simple docstring'''
def __init__( self : str , A_ : Optional[int] , A_ : int ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.Embedding(A_ , A_ )
lowerCamelCase_ = nn.SiLU()
lowerCamelCase_ = nn.Linear(A_ , embedding_dim * 2 )
lowerCamelCase_ = nn.LayerNorm(A_ , elementwise_affine=A_ )
def a__ ( self : List[str] , A_ : List[Any] , A_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.linear(self.silu(self.emb(A_ ) ) )
lowerCamelCase_ , lowerCamelCase_ = torch.chunk(A_ , 2 )
lowerCamelCase_ = self.norm(A_ ) * (1 + scale) + shift
return x
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , A_ : List[str] , A_ : int ) -> str:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = CombinedTimestepLabelEmbeddings(A_ , A_ )
lowerCamelCase_ = nn.SiLU()
lowerCamelCase_ = nn.Linear(A_ , 6 * embedding_dim , bias=A_ )
lowerCamelCase_ = nn.LayerNorm(A_ , elementwise_affine=A_ , eps=1E-6 )
def a__ ( self : Optional[Any] , A_ : int , A_ : Optional[int] , A_ : Optional[Any] , A_ : Any=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.linear(self.silu(self.emb(A_ , A_ , hidden_dtype=A_ ) ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = emb.chunk(6 , dim=1 )
lowerCamelCase_ = self.norm(A_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A_ : int , A_ : int , A_ : int , A_ : Optional[str] = None , A_ : float = 1E-5 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = num_groups
lowerCamelCase_ = eps
if act_fn is None:
lowerCamelCase_ = None
else:
lowerCamelCase_ = get_activation(A_ )
lowerCamelCase_ = nn.Linear(A_ , out_dim * 2 )
def a__ ( self : Dict , A_ : List[Any] , A_ : Dict ) -> Dict:
"""simple docstring"""
if self.act:
lowerCamelCase_ = self.act(A_ )
lowerCamelCase_ = self.linear(A_ )
lowerCamelCase_ = emb[:, :, None, None]
lowerCamelCase_ , lowerCamelCase_ = emb.chunk(2 , dim=1 )
lowerCamelCase_ = F.group_norm(A_ , self.num_groups , eps=self.eps )
lowerCamelCase_ = x * (1 + scale) + shift
return x
| 208 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , A_ : int = 32 , A_ : int = 64 , A_ : int = 20 , A_ : int = 768 , A_ : Optional[Any]=77 , A_ : Optional[int]=4 , A_ : float = 0.0 , A_ : str = "silu" , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[str] = "linear" , A_ : Optional[str] = "prd" , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = additional_embeddings
lowerCamelCase_ = time_embed_dim or inner_dim
lowerCamelCase_ = embedding_proj_dim or embedding_dim
lowerCamelCase_ = clip_embed_dim or embedding_dim
lowerCamelCase_ = Timesteps(A_ , A_ , 0 )
lowerCamelCase_ = TimestepEmbedding(A_ , A_ , out_dim=A_ , act_fn=A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
if embedding_proj_norm_type is None:
lowerCamelCase_ = None
elif embedding_proj_norm_type == "layer":
lowerCamelCase_ = nn.LayerNorm(A_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
lowerCamelCase_ = nn.Linear(A_ , A_ )
if encoder_hid_proj_type is None:
lowerCamelCase_ = None
elif encoder_hid_proj_type == "linear":
lowerCamelCase_ = nn.Linear(A_ , A_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A_ ) )
if added_emb_type == "prd":
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , A_ ) )
elif added_emb_type is None:
lowerCamelCase_ = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , activation_fn='gelu' , attention_bias=A_ , )
for d in range(A_ )
] )
if norm_in_type == "layer":
lowerCamelCase_ = nn.LayerNorm(A_ )
elif norm_in_type is None:
lowerCamelCase_ = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
lowerCamelCase_ = nn.LayerNorm(A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
lowerCamelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , A_ , persistent=A_ )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , A_ ) )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , A_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self : str ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
lowerCamelCase_ = {}
def fn_recursive_add_processors(A_ : str , A_ : torch.nn.Module , A_ : Dict[str, AttentionProcessor] ):
if hasattr(A_ , 'set_processor' ):
lowerCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def a__ ( self : List[Any] , A_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(A_ : str , A_ : torch.nn.Module , A_ : Union[str, Any] ):
if hasattr(A_ , 'set_processor' ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def a__ ( self : Dict , A_ : List[Any] , A_ : Union[torch.Tensor, float, int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.BoolTensor] = None , A_ : bool = True , ) -> str:
"""simple docstring"""
lowerCamelCase_ = hidden_states.shape[0]
lowerCamelCase_ = timestep
if not torch.is_tensor(A_ ):
lowerCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps * torch.ones(A_ , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ = self.time_proj(A_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase_ = timesteps_projected.to(dtype=self.dtype )
lowerCamelCase_ = self.time_embedding(A_ )
if self.embedding_proj_norm is not None:
lowerCamelCase_ = self.embedding_proj_norm(A_ )
lowerCamelCase_ = self.embedding_proj(A_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase_ = self.encoder_hidden_states_proj(A_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCamelCase_ = self.proj_in(A_ )
lowerCamelCase_ = self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase_ = []
lowerCamelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(A_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase_ = hidden_states[:, None, :]
lowerCamelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(A_ , -1 , -1 )
additional_embeds.append(A_ )
lowerCamelCase_ = torch.cat(
A_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase_ = F.pad(
A_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
lowerCamelCase_ = F.pad(A_ , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase_ = self.norm_in(A_ )
for block in self.transformer_blocks:
lowerCamelCase_ = block(A_ , attention_mask=A_ )
lowerCamelCase_ = self.norm_out(A_ )
if self.prd_embedding is not None:
lowerCamelCase_ = hidden_states[:, -1]
else:
lowerCamelCase_ = hidden_states[:, additional_embeddings_len:]
lowerCamelCase_ = self.proj_to_clip_embeddings(A_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A_ )
def a__ ( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 208 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_UpperCamelCase , "r" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = F'class {class_name}('
UpperCamelCase__ = F'{4 * " "}def {test_name}('
UpperCamelCase__ = F'{8 * " "}{correct_line.split()[0]}'
UpperCamelCase__ = F'{16 * " "}{correct_line.split()[0]}'
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = []
for line in lines:
if line.startswith(_UpperCamelCase ):
UpperCamelCase__ = True
elif in_class and line.startswith(_UpperCamelCase ):
UpperCamelCase__ = True
elif in_class and in_func and (line.startswith(_UpperCamelCase ) or line.startswith(_UpperCamelCase )):
UpperCamelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = False
else:
new_lines.append(_UpperCamelCase )
with open(_UpperCamelCase , "w" ) as f:
for line in new_lines:
f.write(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Any=None ) -> str:
'''simple docstring'''
if fail is not None:
with open(_UpperCamelCase , "r" ) as f:
UpperCamelCase__ = {l.strip() for l in f.readlines()}
else:
UpperCamelCase__ = None
with open(_UpperCamelCase , "r" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = defaultdict(_UpperCamelCase )
for line in correct_lines:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
__lowercase: Any = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__lowercase: int = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = feature_size
UpperCamelCase : str = sampling_rate
UpperCamelCase : Dict = padding_value
UpperCamelCase : Optional[int] = kwargs.pop('padding_side', 'right' )
UpperCamelCase : Dict = kwargs.pop('return_attention_mask', SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ) and isinstance(processed_features[0], (dict, BatchFeature) ):
UpperCamelCase : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
UpperCamelCase : Optional[int] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE_ ) == 0:
if return_attention_mask:
UpperCamelCase : int = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCamelCase : Dict = required_input[0]
if isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCamelCase : str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = 'tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = 'pt'
elif isinstance(SCREAMING_SNAKE_CASE_, (int, float, list, tuple, np.ndarray) ):
UpperCamelCase : Tuple = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE_ )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0], (int, float) ):
UpperCamelCase : Tuple = to_numpy(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = [to_numpy(SCREAMING_SNAKE_CASE_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCamelCase : Optional[Any] = self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
UpperCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
if not all(len(SCREAMING_SNAKE_CASE_ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
UpperCamelCase : Any = []
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCamelCase : str = self._truncate(
SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, )
truncated_inputs.append(SCREAMING_SNAKE_CASE_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCamelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCamelCase : int = PaddingStrategy.MAX_LENGTH
UpperCamelCase : Dict = {}
for i in range(SCREAMING_SNAKE_CASE_ ):
# padding
UpperCamelCase : int = self._pad(
truncated_inputs[i], max_length=SCREAMING_SNAKE_CASE_, padding_strategy=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCamelCase : Union[str, Any] = []
if value.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[str] = value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE_ )
return BatchFeature(SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> dict:
UpperCamelCase : Any = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCamelCase : Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCamelCase : Tuple = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCamelCase : List[Any] = np.ones(len(SCREAMING_SNAKE_CASE_ ), dtype=np.intaa )
if needs_to_be_padded:
UpperCamelCase : Union[str, Any] = max_length - len(SCREAMING_SNAKE_CASE_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCamelCase : Any = np.pad(
processed_features['attention_mask'], (0, difference) )
UpperCamelCase : Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCamelCase : Optional[Any] = np.pad(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 'constant', constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCamelCase : List[str] = np.pad(
processed_features['attention_mask'], (difference, 0) )
UpperCamelCase : Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCamelCase : Tuple = np.pad(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 'constant', constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
UpperCamelCase : Tuple = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCamelCase : str = len(SCREAMING_SNAKE_CASE_ ) > max_length
if needs_to_be_truncated:
UpperCamelCase : Optional[int] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCamelCase : Union[str, Any] = processed_features['attention_mask'][:max_length]
return processed_features
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
UpperCamelCase : int = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = PaddingStrategy(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = padding
else:
UpperCamelCase : List[str] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 119 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( snake_case__ : Tuple ) -> List[str]:
UpperCamelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionLatentUpscalePipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([] )
UpperCAmelCase__ : Optional[int] = True
@property
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = 1
UpperCamelCase : List[str] = 4
UpperCamelCase : List[str] = (16, 16)
UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
def snake_case_ ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=SCREAMING_SNAKE_CASE_, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=SCREAMING_SNAKE_CASE_, only_cross_attention=SCREAMING_SNAKE_CASE_, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
UpperCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
UpperCamelCase : Tuple = EulerDiscreteScheduler(prediction_type='sample' )
UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='quick_gelu', projection_dim=512, )
UpperCamelCase : Any = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
UpperCamelCase : List[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1e-3 )
def snake_case_ ( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_, scheduler_enum.name )
UpperCamelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ )[0]
outputs.append(SCREAMING_SNAKE_CASE_ )
assert check_same_shape(SCREAMING_SNAKE_CASE_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCamelCase : int = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type='latent' ).images
UpperCamelCase : List[str] = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Dict = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCamelCase : str = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 119 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = {"""do_clean_text""": False, """add_prefix_space""": False}
def _lowercase ( self ) -> Union[str, Any]:
super().setUp()
# fmt: off
_snake_case = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_snake_case = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_snake_case = {"unk_token": "<unk>"}
_snake_case = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_snake_case = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file ,"w" ) as emoji_writer:
emoji_writer.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
def _lowercase ( self ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
_snake_case = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_snake_case = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
_snake_case , _snake_case = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.decode(_SCREAMING_SNAKE_CASE ,clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return text, ids
def _lowercase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def _lowercase ( self ) -> str:
pass # TODO add if relevant
def _lowercase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def _lowercase ( self ) -> str:
_snake_case = self.get_tokenizer()
# Testing tokenization
_snake_case = "こんにちは、世界。 こんばんは、㔺界。"
_snake_case = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_snake_case = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
_snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_snake_case = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_snake_case = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Dict:
_snake_case = self.get_tokenizer()
# Testing tokenization
_snake_case = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_snake_case = "こんにちは、、、、世界。こんばんは、、、、世界。"
_snake_case = tokenizer.encode(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_snake_case = "こんにちは、世界。"
_snake_case = "こんばんは、㔺界。😀"
_snake_case = "こんにちは、世界。こんばんは、世界。😀"
_snake_case = tokenizer.encode(prefix_text + input_text )
_snake_case = tokenizer.encode("" ,prefix_text=prefix_text + input_text )
_snake_case = tokenizer.encode(_SCREAMING_SNAKE_CASE ,prefix_text=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.decode(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.decode(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ) -> Tuple:
_snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_snake_case = "こんにちは、世界。"
_snake_case = "こんばんは、㔺界。😀"
_snake_case = len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) - 2
_snake_case = len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) - 2
_snake_case = [1] + [0] * (len_prefix + len_text + 1)
_snake_case = [1] * (len_prefix + len_text + 1) + [0]
_snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_snake_case = tokenizer(prefix_text + input_text ).token_type_ids
_snake_case = tokenizer("" ,prefix_text=prefix_text + input_text ).token_type_ids
_snake_case = tokenizer(_SCREAMING_SNAKE_CASE ,prefix_text=_SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ) -> List[str]:
_snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_snake_case = tokenizer.encode("あンいワ" )
_snake_case = tokenizer.encode("" ,prefix_text="あンいワ" )
_snake_case = tokenizer.encode("いワ" ,prefix_text="あン" )
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE ) ,tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE ) ,tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def _lowercase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_snake_case = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_snake_case = tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.batch_encode_plus(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE )
# fmt: off
_snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
_snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> List[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _lowercase ( self ) -> Optional[int]:
# tokenizer has no padding token
pass
| 142 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _UpperCamelCase: Tuple ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = os.path.join(args.tf_model_dir , "parameters.json" )
_snake_case = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_snake_case = args.output + ".pt"
_snake_case = OrderedDict()
with tf.device("/CPU:0" ):
_snake_case = tf.train.load_checkpoint(args.tf_model_dir )
_snake_case = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_snake_case = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_snake_case = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_snake_case = 8
_snake_case = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_snake_case = key_name[-9:-7]
for i in range(16 ):
_snake_case = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_snake_case = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.feed_forward.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.feed_forward.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_snake_case = state[:, 0, :, :]
_snake_case = state[:, 1, :, :]
_snake_case = state[:, 2, :, :]
_snake_case = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
_snake_case = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_snake_case = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.self_attn.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.self_attn.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_snake_case = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_snake_case = "model.%s.weight" % nlayer
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
_snake_case = "lm_head.weight"
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
_snake_case = "final_logits_bias"
_snake_case = vnp.copy() # same in embedded
_snake_case = state.reshape((1, -1) )
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
_snake_case = "model.last_project.weight"
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
_snake_case = "model.last_project.bias"
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase_ : Tuple = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
UpperCamelCase_ : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 142 | 1 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: Optional[int] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a__ , a__: List[Any] = True, True
a__: Optional[Any] = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = 0
a__: List[str] = -1
for i in range(_SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a__: Union[str, Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a__ , a__: Union[str, Any] = check_circuit_or_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
a__: List[Any] = 1
if check == 2:
a__: Tuple = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
a__: Union[str, Any] = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
def __a ( ) ->List[Any]:
a__: Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a__: int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a__: List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a__: Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a__: Optional[int] = {
1: [],
2: []
# all degree is zero
}
a__: Optional[Any] = 10
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 290 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Optional[Any] = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['MobileNetV2FeatureExtractor']
_lowercase : str = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['torch', 'torchsde']
def __init__( self : Union[str, Any], *lowerCamelCase : str, **lowerCamelCase : int )-> Tuple:
requires_backends(self, ['''torch''', '''torchsde'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Optional[Any], **lowerCamelCase : Dict )-> str:
requires_backends(cls, ['''torch''', '''torchsde'''] )
@classmethod
def snake_case ( cls : Tuple, *lowerCamelCase : Dict, **lowerCamelCase : Tuple )-> List[str]:
requires_backends(cls, ['''torch''', '''torchsde'''] )
| 272 | 0 |
'''simple docstring'''
from math import ceil
def __magic_name__( lowerCamelCase = 1_0_0_1):
__lowerCAmelCase = 1
for i in range(1, int(ceil(n / 2.0))):
__lowerCAmelCase = 2 * i + 1
__lowerCAmelCase = 2 * i
__lowerCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_UpperCAmelCase : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 174 |
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15 | 0 |
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
_snake_case = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( UpperCamelCase__ : int = 100 ) -> int:
'''simple docstring'''
_snake_case = 1
_snake_case = 2
for i in range(2 , max_n + 1 ):
_snake_case = pre_numerator
_snake_case = 2 * i // 3 if i % 3 == 0 else 1
_snake_case = cur_numerator
_snake_case = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 352 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCamelCase ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase =direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCamelCase =transformers.models.auto.configuration_auto.CONFIG_MAPPING
_lowerCamelCase ={
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase : Optional[int] = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''', lowerCamelCase, )
is not None
):
lowerCamelCase : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase : Union[str, Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase : Tuple = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowerCamelCase : Optional[int] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowerCamelCase : Any = True
if not attribute_used:
lowerCamelCase : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase : List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase : Union[str, Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase : List[Any] = True
elif attribute.endswith("""_token_id""" ):
lowerCamelCase : str = True
# configuration class specific cases
if not case_allowed:
lowerCamelCase : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
lowerCamelCase : List[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase : Any = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowerCamelCase : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase : Optional[int] = {}
if len(config_class.attribute_map ) > 0:
lowerCamelCase : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase : Optional[Any] = inspect.getsourcefile(lowerCamelCase )
lowerCamelCase : List[Any] = os.path.dirname(lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase : Optional[Any] = [os.path.join(lowerCamelCase, lowerCamelCase ) for fn in os.listdir(lowerCamelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowerCamelCase : int = []
for path in modeling_paths:
if os.path.isfile(lowerCamelCase ):
with open(lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase : Union[str, Any] = []
for config_param, default_value in zip(lowerCamelCase, lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase : Optional[int] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(lowerCamelCase )
def _a ( ):
lowerCamelCase : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase : Optional[int] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda lowerCamelCase : inspect.isclass(lowerCamelCase )
and issubclass(lowerCamelCase, lowerCamelCase )
and inspect.getmodule(lowerCamelCase ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
lowerCamelCase : str = check_config_attributes_being_used(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCamelCase : Dict = unused_attributes
if len(lowerCamelCase ) > 0:
lowerCamelCase : Tuple = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 287 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : int = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
_UpperCAmelCase : Dict = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
_UpperCAmelCase : Optional[Any] = nn.Parameter(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : List[str] = np.asarray(weights[0] )
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : Optional[int] = np.asarray(weights[0] )
_UpperCAmelCase : Tuple = np.asarray(weights[1] )
_UpperCAmelCase : List[str] = np.asarray(weights[2] )
_UpperCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# layernorm 1
_UpperCAmelCase : Tuple = weights[0][0][0]
_UpperCAmelCase : Optional[int] = np.asarray(layer_norm_a[0] )
_UpperCAmelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
_UpperCAmelCase : List[Any] = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
_UpperCAmelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
_UpperCAmelCase : List[str] = intermediate_weights[2]
# layernorm 2
_UpperCAmelCase : str = np.asarray(intermediate_weights[0][0] )
_UpperCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
_UpperCAmelCase : int = np.asarray(intermediate_weights[1][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
_UpperCAmelCase : Tuple = np.asarray(intermediate_weights[4][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# reformer model
_UpperCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCAmelCase : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
_UpperCAmelCase : Dict = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
_UpperCAmelCase : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCAmelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
_UpperCAmelCase : str = np.asarray(weights[7][0] )
_UpperCAmelCase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
_UpperCAmelCase : Tuple = np.asarray(weights[9][0] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# Initialise PyTorch model
_UpperCAmelCase : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase : Any = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
_UpperCAmelCase : List[str] = pickle.load(lowerCAmelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 170 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__a = "cuda" if torch.cuda.is_available() else "cpu"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=100 , _lowerCAmelCase=" " ) -> List[str]:
snake_case__ : Dict = text.split(snake_case_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case_ ) , snake_case_ )]
def __snake_case( _lowerCAmelCase ) -> dict:
snake_case__ : Optional[int] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(snake_case_ ):
titles.append(title if title is not None else """""" )
texts.append(snake_case_ )
return {"title": titles, "text": texts}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> dict:
snake_case__ : Tuple = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=snake_case_ , padding="""longest""" , return_tensors="""pt""" )['''input_ids''']
snake_case__ : int = ctx_encoder(input_ids.to(device=snake_case_ ) , return_dict=snake_case_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple:
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
snake_case__ : Optional[Any] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
snake_case__ : str = dataset.map(snake_case_ , batched=snake_case_ , num_proc=processing_args.num_proc )
# And compute the embeddings
snake_case__ : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ )
snake_case__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
snake_case__ : Optional[int] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
snake_case__ : List[Any] = dataset.map(
partial(snake_case_ , ctx_encoder=snake_case_ , ctx_tokenizer=snake_case_ ) , batched=snake_case_ , batch_size=processing_args.batch_size , features=snake_case_ , )
# And finally save your dataset
snake_case__ : int = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(snake_case_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
snake_case__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=snake_case_ )
# And save the index
snake_case__ : List[str] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(snake_case_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowercase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowercase = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowercase = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowercase = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowercase = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowercase = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__a , __a , __a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 35 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
if not conversation_id:
__A : List[Any] = uuid.uuida()
if past_user_inputs is None:
__A : List[str] = []
if generated_responses is None:
__A : Tuple = []
__A : uuid.UUID = conversation_id
__A : List[str] = past_user_inputs
__A : List[str] = generated_responses
__A : Optional[str] = text
def __eq__( self , __lowerCamelCase ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__A : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__A : Union[str, Any] = text
def UpperCamelCase__( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__A : List[Any] = None
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
self.generated_responses.append(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__A : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__A : Tuple = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__A : Union[str, Any] = self.tokenizer.eos_token
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : str = {}
__A : List[str] = {}
__A : Any = {}
if min_length_for_response is not None:
__A : int = min_length_for_response
if minimum_tokens is not None:
__A : Any = minimum_tokens
if "max_length" in generate_kwargs:
__A : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__A : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __lowerCamelCase , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Any = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=32 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__A : List[Any] = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__A : int = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
__A : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__A : int = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=10 , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__A : str = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__A : str = max_length - minimum_tokens
__A : Any = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__A : Union[str, Any] = model_inputs['''attention_mask'''][:, -trim:]
__A : Dict = model_inputs.pop('''conversation''' )
__A : List[str] = max_length
__A : Dict = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
__A : Any = 1
else:
__A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
__A : int = model_outputs['''output_ids''']
__A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
__A : Dict = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = self.tokenizer.eos_token_id
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
__A : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 179 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case : Optional[int] = 3_84
__snake_case : str = 7
if "tiny" in model_name:
__snake_case : str = 96
__snake_case : Tuple = (2, 2, 6, 2)
__snake_case : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
__snake_case : List[Any] = 96
__snake_case : Union[str, Any] = (2, 2, 18, 2)
__snake_case : int = (3, 6, 12, 24)
elif "base" in model_name:
__snake_case : Union[str, Any] = 1_28
__snake_case : int = (2, 2, 18, 2)
__snake_case : Dict = (4, 8, 16, 32)
__snake_case : List[Any] = 12
__snake_case : Optional[int] = 5_12
elif "large" in model_name:
__snake_case : Optional[Any] = 1_92
__snake_case : Dict = (2, 2, 18, 2)
__snake_case : List[Any] = (6, 12, 24, 48)
__snake_case : Union[str, Any] = 12
__snake_case : List[Any] = 7_68
# set label information
__snake_case : Optional[int] = 1_50
__snake_case : str = 'huggingface/label-files'
__snake_case : List[Any] = 'ade20k-id2label.json'
__snake_case : Any = json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ,'r' ) )
__snake_case : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
__snake_case : List[Any] = SwinConfig(
embed_dim=_UpperCAmelCase ,depths=_UpperCAmelCase ,num_heads=_UpperCAmelCase ,window_size=_UpperCAmelCase ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
__snake_case : str = UperNetConfig(
backbone_config=_UpperCAmelCase ,auxiliary_in_channels=_UpperCAmelCase ,num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ,)
return config
def a_ ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
__snake_case : str = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ) -> Tuple:
__snake_case : List[Any] = dct.pop(_UpperCAmelCase )
__snake_case : str = val
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ) -> str:
__snake_case : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : Optional[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__snake_case : Tuple = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[:dim, :]
__snake_case : int = in_proj_bias[: dim]
__snake_case : str = in_proj_weight[
dim : dim * 2, :
]
__snake_case : str = in_proj_bias[
dim : dim * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def a_ ( _UpperCAmelCase : Any ) -> Optional[Any]:
__snake_case , __snake_case : Union[str, Any] = x.shape
__snake_case : Any = x.reshape(_UpperCAmelCase ,4 ,in_channel // 4 )
__snake_case : str = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
return x
def a_ ( _UpperCAmelCase : str ) -> List[Any]:
__snake_case , __snake_case : Union[str, Any] = x.shape
__snake_case : Tuple = x.reshape(_UpperCAmelCase ,in_channel // 4 ,4 )
__snake_case : Any = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
return x
def a_ ( _UpperCAmelCase : List[str] ) -> List[str]:
__snake_case : int = x.shape[0]
__snake_case : Optional[Any] = x.reshape(4 ,in_channel // 4 )
__snake_case : int = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_UpperCAmelCase )
return x
def a_ ( _UpperCAmelCase : Optional[int] ) -> Any:
__snake_case : Tuple = x.shape[0]
__snake_case : Tuple = x.reshape(in_channel // 4 ,4 )
__snake_case : Dict = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_UpperCAmelCase )
return x
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ) -> str:
__snake_case : Optional[Any] = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
__snake_case : List[str] = model_name_to_url[model_name]
__snake_case : str = torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location='cpu' ,file_name=_UpperCAmelCase )[
'state_dict'
]
for name, param in state_dict.items():
print(_UpperCAmelCase ,param.shape )
__snake_case : List[str] = get_upernet_config(_UpperCAmelCase )
__snake_case : List[Any] = UperNetForSemanticSegmentation(_UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__snake_case : int = state_dict.pop(_UpperCAmelCase )
if "bn" in key:
__snake_case : Tuple = key.replace('bn' ,'batch_norm' )
__snake_case : Union[str, Any] = val
# rename keys
__snake_case : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__snake_case : str = reverse_correct_unfold_reduction_order(_UpperCAmelCase )
if "norm" in key:
__snake_case : List[str] = reverse_correct_unfold_norm_order(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# verify on image
__snake_case : List[str] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__snake_case : Union[str, Any] = Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ).convert('RGB' )
__snake_case : Tuple = SegformerImageProcessor()
__snake_case : str = processor(_UpperCAmelCase ,return_tensors='pt' ).pixel_values
with torch.no_grad():
__snake_case : Any = model(_UpperCAmelCase )
__snake_case : Dict = outputs.logits
print(logits.shape )
print('First values of logits:' ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__snake_case : int = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
__snake_case : Any = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
__snake_case : Dict = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
__snake_case : List[str] = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('Logits:' ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_UpperCAmelCase ,atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 1 |
import random
class __A:
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> tuple[list[int], list[int]]:
'''simple docstring'''
__a = [ord(_snake_case ) for i in text]
__a = []
__a = []
for i in plain:
__a = random.randint(1 , 300 )
__a = (i + k) * k
cipher.append(_snake_case )
key.append(_snake_case )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = []
for i in range(len(_snake_case ) ):
__a = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_snake_case ) )
return "".join(_snake_case )
if __name__ == "__main__":
A , A : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k)) | 6 | """simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _a ( UpperCamelCase__ ):
_lowercase : Dict = '''longformer'''
def __init__( self: str , UpperCamelCase_: Union[List[int], int] = 512 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 30_522 , UpperCamelCase_: int = 768 , UpperCamelCase_: int = 12 , UpperCamelCase_: int = 12 , UpperCamelCase_: int = 3_072 , UpperCamelCase_: str = "gelu" , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 2 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: float = 1E-1_2 , UpperCamelCase_: bool = False , **UpperCamelCase_: Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = attention_window
lowercase__ = sep_token_id
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = onnx_export
class _a ( UpperCamelCase__ ):
def __init__( self: Dict , UpperCamelCase_: "PretrainedConfig" , UpperCamelCase_: str = "default" , UpperCamelCase_: "List[PatchingSpec]" = None ) -> List[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = True
@property
def lowerCamelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase_ ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = super().outputs
if self.task == "default":
lowercase__ = {0: '''batch'''}
return outputs
@property
def lowerCamelCase_ ( self: Optional[int] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: "PreTrainedTokenizerBase" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = super().generate_dummy_inputs(
preprocessor=UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowercase__ = 1
return inputs
| 369 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict=0.9 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=0.5 ) -> Dict:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ = [
meteor_score.single_meteor_score(
word_tokenize(UpperCamelCase_ ) , word_tokenize(UpperCamelCase_ ) , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
else:
lowercase__ = [
meteor_score.single_meteor_score(UpperCamelCase_ , UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return {"meteor": np.mean(UpperCamelCase_ )}
| 93 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
A_ = {}
if frame_sampling_rate is not None:
A_ = frame_sampling_rate
if num_frames is not None:
A_ = num_frames
A_ = {}
if top_k is not None:
A_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 ) -> List[Any]:
'''simple docstring'''
if num_frames is None:
A_ = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
A_ = BytesIO(requests.get(UpperCamelCase__ ).content )
A_ = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
A_ = 0
A_ = num_frames * frame_sampling_rate - 1
A_ = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
A_ = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
A_ = list(UpperCamelCase__ )
A_ = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.model(**UpperCamelCase__ )
return model_outputs
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=5 ) -> int:
'''simple docstring'''
if top_k > self.model.config.num_labels:
A_ = self.model.config.num_labels
if self.framework == "pt":
A_ = model_outputs.logits.softmax(-1 )[0]
A_ = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A_ = scores.tolist()
A_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 162 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = """ylacombe/bark-small"""
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[Any] = """en_speaker_1"""
__UpperCAmelCase : Union[str, Any] = """This is a test string"""
__UpperCAmelCase : Dict = """speaker_embeddings_path.json"""
__UpperCAmelCase : Any = """speaker_embeddings"""
def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Any = BarkProcessor(tokenizer=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__UpperCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__UpperCAmelCase : List[str] = 35
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 8
__UpperCAmelCase : Optional[Any] = {
"""semantic_prompt""": np.ones(UpperCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Optional[int] = processor(text=self.input_string , voice_preset=UpperCamelCase )
__UpperCAmelCase : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__UpperCAmelCase : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BarkProcessor(tokenizer=UpperCamelCase )
__UpperCAmelCase : List[str] = processor(text=self.input_string )
__UpperCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 115 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = dataset
SCREAMING_SNAKE_CASE_: List[str] = process
SCREAMING_SNAKE_CASE_: int = params
def __len__( self : List[str]):
return len(self.dataset)
def __getitem__( self : Tuple , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.process(_SCREAMING_SNAKE_CASE , **self.params)
return processed
class __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]=None):
SCREAMING_SNAKE_CASE_: Any = loader
SCREAMING_SNAKE_CASE_: Tuple = infer
SCREAMING_SNAKE_CASE_: List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE_: str = None
SCREAMING_SNAKE_CASE_: List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: Any = None
def __len__( self : List[Any]):
return len(self.loader)
def __iter__( self : Any):
SCREAMING_SNAKE_CASE_: Tuple = iter(self.loader)
return self
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE_: Any = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE_: List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE_: List[str] = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
SCREAMING_SNAKE_CASE_: Dict = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
SCREAMING_SNAKE_CASE_: Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
SCREAMING_SNAKE_CASE_: Dict = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
SCREAMING_SNAKE_CASE_: List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE_: List[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE_: Tuple = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE_: Any = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE_: Any = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE_: List[str] = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE)
self._loader_batch_index += 1
return result
def _SCREAMING_SNAKE_CASE ( self : Dict):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE_: Dict = next(self.iterator)
SCREAMING_SNAKE_CASE_: List[str] = self.infer(_SCREAMING_SNAKE_CASE , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor):
SCREAMING_SNAKE_CASE_: Optional[Any] = processed
else:
SCREAMING_SNAKE_CASE_: Dict = list(processed.keys())[0]
SCREAMING_SNAKE_CASE_: int = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_: Tuple = len(_SCREAMING_SNAKE_CASE)
else:
SCREAMING_SNAKE_CASE_: int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE_: Any = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE_: Tuple = processed
SCREAMING_SNAKE_CASE_: List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None):
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __iter__( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = iter(self.loader)
SCREAMING_SNAKE_CASE_: str = None
return self
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
if self.subiterator is None:
SCREAMING_SNAKE_CASE_: List[Any] = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
SCREAMING_SNAKE_CASE_: Any = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE_: int = self.infer(next(self.iterator) , **self.params)
SCREAMING_SNAKE_CASE_: Union[str, Any] = next(self.subiterator)
return processed
class __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
def __iter__( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = iter(self.loader)
return self
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE_: Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE_: Union[str, Any] = item.pop("is_last")
accumulator.append(_SCREAMING_SNAKE_CASE)
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE_: Optional[int] = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor):
SCREAMING_SNAKE_CASE_: Any = processed
else:
SCREAMING_SNAKE_CASE_: List[Any] = list(processed.keys())[0]
SCREAMING_SNAKE_CASE_: int = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_: Dict = len(_SCREAMING_SNAKE_CASE)
else:
SCREAMING_SNAKE_CASE_: List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE_: Union[str, Any] = observed_batch_size
SCREAMING_SNAKE_CASE_: Any = processed
SCREAMING_SNAKE_CASE_: int = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.loader_batch_item()
SCREAMING_SNAKE_CASE_: Any = item.pop("is_last")
accumulator.append(_SCREAMING_SNAKE_CASE)
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = processed
SCREAMING_SNAKE_CASE_: Any = item.pop("is_last")
accumulator.append(_SCREAMING_SNAKE_CASE)
return accumulator
class __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Tuple = dataset
SCREAMING_SNAKE_CASE_: Dict = key
def __len__( self : Optional[Any]):
return len(self.dataset)
def __getitem__( self : Any , lowerCAmelCase__ : Any):
return self.dataset[i][self.key]
class __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = dataset
SCREAMING_SNAKE_CASE_: Dict = keya
SCREAMING_SNAKE_CASE_: List[Any] = keya
def __len__( self : Optional[int]):
return len(self.dataset)
def __getitem__( self : Tuple , lowerCAmelCase__ : Optional[int]):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 360 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = StableDiffusionInpaintPipeline
_UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase : Optional[int] = frozenset([] )
def _SCREAMING_SNAKE_CASE ( self : int):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
SCREAMING_SNAKE_CASE_: List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_: Tuple = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB").resize((64, 64))
SCREAMING_SNAKE_CASE_: List[str] = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: int = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : List[str]):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
SCREAMING_SNAKE_CASE_: List[str] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Any = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: str = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9E-3
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
SCREAMING_SNAKE_CASE_: str = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Dict = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5E-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: List[str] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Tuple = PNDMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler")
SCREAMING_SNAKE_CASE_: Any = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Any = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Any = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 127 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Union[str, Any] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ["MaskFormerFeatureExtractor"]
A__ : Dict = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
A__ : Union[str, Any] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 185 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a :List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE__ : str = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = PegasusConfig(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ : Tuple = v.T
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : Any = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : int = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.train.list_variables(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE__ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : str = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = array
return tf_weights
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# save tokenizer first
SCREAMING_SNAKE_CASE__ : Any = Path(__lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE__ : Dict = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tf_weights_as_numpy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE__ : str = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a :Optional[Any] = parser.parse_args()
if args.save_dir is None:
a :List[Any] = Path(args.tf_ckpt_path).parent.name
a :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = '''donut-swin'''
UpperCAmelCase__: Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.0_2 , A__=1e-5 , **A__ , ):
super().__init__(**A__ )
A__ : Any = image_size
A__ : Optional[Any] = patch_size
A__ : List[Any] = num_channels
A__ : Optional[Any] = embed_dim
A__ : str = depths
A__ : Tuple = len(A__ )
A__ : Optional[int] = num_heads
A__ : Union[str, Any] = window_size
A__ : Optional[Any] = mlp_ratio
A__ : int = qkv_bias
A__ : List[Any] = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Dict = drop_path_rate
A__ : Dict = hidden_act
A__ : Dict = use_absolute_embeddings
A__ : int = layer_norm_eps
A__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : Union[str, Any] = int(embed_dim * 2 ** (len(A__ ) - 1) )
| 371 |
import requests
A_ : List[Any] = 'YOUR API KEY'
def UpperCamelCase (lowercase_: str , lowercase_: str = giphy_api_key ) -> list:
A__ : Dict = """+""".join(query.split() )
A__ : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
A__ : Any = requests.get(lowercase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 141 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase = parser.parse_args()
if args.check_lib:
lowerCAmelCase = importlib.import_module('''transformers''')
lowerCAmelCase = Path(transformers_module.__file__).parent
else:
lowerCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 295 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Any , **_UpperCAmelCase : str ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
if padding is not None:
UpperCAmelCase__ = padding
if truncation is not None:
UpperCAmelCase__ = truncation
if top_k is not None:
UpperCAmelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , _UpperCAmelCase : Union["Image.Image", str] , _UpperCAmelCase : str = None , **_UpperCAmelCase : int ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (Image.Image, str) ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = {"""image""": image, """question""": question}
else:
UpperCAmelCase__ = image
UpperCAmelCase__ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str=False , _UpperCAmelCase : Any=False ):
"""simple docstring"""
UpperCAmelCase__ = load_image(inputs["""image"""] )
UpperCAmelCase__ = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(_UpperCAmelCase )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 61 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = BlipImageProcessor()
UpperCAmelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase__ = InstructBlipProcessor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).qformer_tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = qformer_tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 61 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = value
UpperCamelCase = random()
UpperCamelCase = None
UpperCamelCase = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
UpperCamelCase = str(self.value ) + """ """
UpperCamelCase = str(self.left or """""" )
UpperCamelCase = str(self.right or """""" )
return value + left + right
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCamelCase ,UpperCamelCase = split(root.left , __UpperCamelCase )
return left, root
else:
UpperCamelCase ,UpperCamelCase = split(root.right , __UpperCamelCase )
return root, right
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCamelCase = merge(left.right , __UpperCamelCase )
return left
else:
UpperCamelCase = merge(__UpperCamelCase , right.left )
return right
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Node | None:
UpperCamelCase = Node(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = split(__UpperCamelCase , __UpperCamelCase )
return merge(merge(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Node | None:
UpperCamelCase ,UpperCamelCase = split(__UpperCamelCase , value - 1 )
UpperCamelCase ,UpperCamelCase = split(__UpperCamelCase , __UpperCamelCase )
return merge(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Node | None:
for arg in args.split():
if arg[0] == "+":
UpperCamelCase = insert(__UpperCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
UpperCamelCase = erase(__UpperCamelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def lowercase__ ( )-> None:
UpperCamelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
UpperCamelCase = input()
while args != "q":
UpperCamelCase = interact_treap(__UpperCamelCase , __UpperCamelCase )
print(__UpperCamelCase )
UpperCamelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 321 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : List[Any] =["flax"]
def __init__( self : Union[str, Any] , *a : Optional[int] , **a : Dict ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Optional[Any] , **a : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *a : Optional[Any] , **a : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Any =["flax"]
def __init__( self : Dict , *a : Dict , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : str , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *a : Tuple , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : int =["flax"]
def __init__( self : Optional[Any] , *a : List[str] , **a : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : List[str] , **a : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *a : List[str] , **a : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Dict =["flax"]
def __init__( self : Optional[int] , *a : Union[str, Any] , **a : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *a : Any , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Optional[Any] , **a : Any ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =["flax"]
def __init__( self : str , *a : Optional[int] , **a : Dict ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Dict , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *a : Any , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["flax"]
def __init__( self : Optional[int] , *a : str , **a : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : int , **a : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : str , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Tuple =["flax"]
def __init__( self : Optional[int] , *a : int , **a : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *a : Optional[int] , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *a : Optional[int] , **a : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["flax"]
def __init__( self : Union[str, Any] , *a : Union[str, Any] , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *a : Optional[int] , **a : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : List[Any] , **a : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : List[str] =["flax"]
def __init__( self : Tuple , *a : Dict , **a : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : Optional[int] , **a : Any ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *a : Union[str, Any] , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : str =["flax"]
def __init__( self : int , *a : Any , **a : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *a : List[Any] , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *a : str , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : List[str] =["flax"]
def __init__( self : Tuple , *a : str , **a : Any ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *a : Optional[Any] , **a : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *a : Tuple , **a : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Any =["flax"]
def __init__( self : str , *a : Union[str, Any] , **a : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : Optional[int] , **a : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *a : Optional[int] , **a : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class a__ ( metaclass=UpperCAmelCase__ ):
lowerCamelCase : Any =["flax"]
def __init__( self : Union[str, Any] , *a : Any , **a : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *a : Dict , **a : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *a : str , **a : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
| 237 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
__lowerCamelCase = 3
__lowerCamelCase = 2_50
__lowerCamelCase = ids_tensor((batch_size, length) , a )
__lowerCamelCase = torch.ones((batch_size, length) , device=a , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = MaxLengthCriteria(max_length=10 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
__lowerCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__lowerCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a ) , 1 )
| 237 | 1 |
import argparse
import copy
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Dict ={}
with open(__a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCamelCase__: str =[]
_list.append([line.split()[1], line.split()[2]] )
lowerCamelCase__: Optional[int] =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCamelCase__: int =[]
_list.append([line.split()[0], line.split()[2]] )
lowerCamelCase__: List[Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
with open(__a ) as f:
lowerCamelCase__: Dict =f.read(1 )
lowerCamelCase__: str =start_node
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: Optional[int] =start_node
lowerCamelCase__: Dict =0
while visiting not in first_solution:
lowerCamelCase__: int =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__a ) and k[0] not in first_solution:
lowerCamelCase__: str =k[1]
lowerCamelCase__: List[str] =k[0]
first_solution.append(__a )
lowerCamelCase__: str =distance_of_first_solution + int(__a )
lowerCamelCase__: List[str] =best_node
first_solution.append(__a )
lowerCamelCase__: Union[str, Any] =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCamelCase__: List[str] =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Optional[int] =[]
for n in solution[1:-1]:
lowerCamelCase__: Any =solution.index(__a )
for kn in solution[1:-1]:
lowerCamelCase__: Tuple =solution.index(__a )
if n == kn:
continue
lowerCamelCase__: Optional[int] =copy.deepcopy(__a )
lowerCamelCase__: Tuple =kn
lowerCamelCase__: Dict =n
lowerCamelCase__: List[Any] =0
for k in _tmp[:-1]:
lowerCamelCase__: Tuple =_tmp[_tmp.index(__a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCamelCase__: Dict =distance + int(i[1] )
_tmp.append(__a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCamelCase__: Dict =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =1
lowerCamelCase__: Tuple =first_solution
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Union[str, Any] =distance_of_first_solution
lowerCamelCase__: Optional[Any] =solution
while count <= iters:
lowerCamelCase__: Union[str, Any] =find_neighborhood(__a , __a )
lowerCamelCase__: List[str] =0
lowerCamelCase__: str =neighborhood[index_of_best_solution]
lowerCamelCase__: Union[str, Any] =len(__a ) - 1
lowerCamelCase__: Any =False
while not found:
lowerCamelCase__: List[str] =0
while i < len(__a ):
if best_solution[i] != solution[i]:
lowerCamelCase__: Tuple =best_solution[i]
lowerCamelCase__: Union[str, Any] =solution[i]
break
lowerCamelCase__: str =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCamelCase__: Dict =True
lowerCamelCase__: str =best_solution[:-1]
lowerCamelCase__: Dict =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCamelCase__: Dict =cost
lowerCamelCase__: Union[str, Any] =solution
else:
lowerCamelCase__: Any =index_of_best_solution + 1
lowerCamelCase__: Any =neighborhood[index_of_best_solution]
if len(__a ) >= size:
tabu_list.pop(0 )
lowerCamelCase__: Dict =count + 1
return best_solution_ever, best_cost
def lowerCAmelCase_ ( __a=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =generate_neighbours(args.File )
lowerCamelCase__ , lowerCamelCase__: Tuple =generate_first_solution(
args.File , __a )
lowerCamelCase__ , lowerCamelCase__: Dict =tabu_search(
__a , __a , __a , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = None ):
snake_case_ : List[Any] = tesseract_config if tesseract_config is not None else ''
# apply OCR
snake_case_ : Dict = to_pil_image(_lowercase )
snake_case_ ,snake_case_ : Dict = pil_image.size
snake_case_ : Any = pytesseract.image_to_data(_lowercase , lang=_lowercase , output_type='dict' , config=_lowercase )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : int = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
snake_case_ : str = [idx for idx, word in enumerate(_lowercase ) if not word.strip()]
snake_case_ : str = [word for idx, word in enumerate(_lowercase ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
snake_case_ : Optional[int] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
snake_case_ : Tuple = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : Union[str, Any] = []
for x, y, w, h in zip(_lowercase , _lowercase , _lowercase , _lowercase ):
snake_case_ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(_lowercase )
# finally, normalize the bounding boxes
snake_case_ : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowercase , _lowercase , _lowercase ) )
assert len(_lowercase ) == len(_lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**__A )
snake_case_ : Dict = size if size is not None else {'height': 224, 'width': 224}
snake_case_ : Union[str, Any] = get_size_dict(__A )
snake_case_ : List[Any] = do_resize
snake_case_ : Any = size
snake_case_ : Union[str, Any] = resample
snake_case_ : Union[str, Any] = apply_ocr
snake_case_ : List[Any] = ocr_lang
snake_case_ : Any = tesseract_config
def UpperCAmelCase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Dict = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case_ : List[Any] = (size['height'], size['width'])
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def UpperCAmelCase_ ( self : Tuple , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = size if size is not None else self.size
snake_case_ : Union[str, Any] = get_size_dict(__A )
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Dict = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : Optional[int] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(__A ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
snake_case_ : Dict = []
snake_case_ : str = []
for image in images:
snake_case_ ,snake_case_ : Dict = apply_tesseract(__A , __A , __A )
words_batch.append(__A )
boxes_batch.append(__A )
if do_resize:
snake_case_ : str = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ : List[str] = [flip_channel_order(__A ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(__A , __A ) for image in images]
snake_case_ : List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=__A )
if apply_ocr:
snake_case_ : List[str] = words_batch
snake_case_ : Tuple = boxes_batch
return data
| 359 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: int = AltDiffusionPipeline
__magic_name__: Any = TEXT_TO_IMAGE_PARAMS
__magic_name__: Any = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__: Any = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
snake_case_ : Any = CLIPTextModel(_A )
snake_case_ : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ : Dict = 77
snake_case_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : int , _A : Optional[int] , _A : int=0 ) -> Dict:
"""simple docstring"""
if str(_A ).startswith('mps' ):
snake_case_ : Union[str, Any] = torch.manual_seed(_A )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
snake_case_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ : Any = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Optional[Any] = RobertaSeriesModelWithTransformation(_A )
snake_case_ : Optional[Any] = text_encoder
snake_case_ : Optional[Any] = AltDiffusionPipeline(**_A )
snake_case_ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : Optional[Any] = self.get_dummy_inputs(_A )
snake_case_ : int = 'A photo of an astronaut'
snake_case_ : Tuple = alt_pipe(**_A )
snake_case_ : Any = output.images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Any = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
snake_case_ : List[str] = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
snake_case_ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Tuple = RobertaSeriesModelWithTransformation(_A )
snake_case_ : Any = text_encoder
snake_case_ : Tuple = AltDiffusionPipeline(**_A )
snake_case_ : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : Dict = self.get_dummy_inputs(_A )
snake_case_ : Tuple = alt_pipe(**_A )
snake_case_ : int = output.images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Optional[int] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_A )
snake_case_ : Optional[int] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : str = 'A painting of a squirrel eating a burger'
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : str = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
snake_case_ : Any = output.images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Union[str, Any] = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
snake_case_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_A , safety_checker=_A )
snake_case_ : List[str] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : List[Any] = 'A painting of a squirrel eating a burger'
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[Any] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='numpy' )
snake_case_ : Any = output.images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[Any] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 88 | 0 |
lowerCAmelCase__ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 11 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( _UpperCAmelCase ):
lowerCAmelCase_ = ['pixel_values']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = size if size is not None else {"""height""": 3_84, """width""": 3_84}
lowercase_ : Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = do_resize
lowercase_ : Optional[int] = size
lowercase_ : List[str] = resample
lowercase_ : str = do_rescale
lowercase_ : Any = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : Any = do_convert_rgb
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowercase_ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : Any = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : Optional[int] = size if size is not None else self.size
lowercase_ : Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Optional[Any] = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ : int = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_outputs
| 355 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 264 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 82 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowerCAmelCase ( snake_case__):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Optional[NestedDataStructureLike[PathLike]] = None , _lowerCAmelCase: Optional[NamedSplit] = None , _lowerCAmelCase: Optional[Features] = None , _lowerCAmelCase: str = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[int] = None , **_lowerCAmelCase: List[str] , ):
lowercase :Dict = path_or_paths
lowercase :Union[str, Any] = split if split or isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else "train"
lowercase :List[Any] = features
lowercase :Any = cache_dir
lowercase :Tuple = keep_in_memory
lowercase :List[Any] = streaming
lowercase :Optional[int] = num_proc
lowercase :Tuple = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
pass
class __lowerCAmelCase ( snake_case__):
def __init__( self: int , _lowerCAmelCase: Optional[Features] = None , _lowerCAmelCase: str = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[int] = None , **_lowerCAmelCase: int , ):
lowercase :Optional[Any] = features
lowercase :Any = cache_dir
lowercase :List[Any] = keep_in_memory
lowercase :Dict = streaming
lowercase :str = num_proc
lowercase :Optional[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE ( self: str ):
pass
| 365 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 158 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : Optional[int] ) -> Dict:
while b:
_a = b, a % b
return a
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[Any] ) -> List[Any]:
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def _lowerCamelCase ( ) -> Any:
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 63 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=32 * 4 , lowercase_ : str=32 * 6 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=32 , ) -> Optional[int]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = is_training
UpperCAmelCase : int = use_auxiliary_loss
UpperCAmelCase : List[Any] = num_queries
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[str] = min_size
UpperCAmelCase : Dict = max_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : str = mask_feature_size
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
UpperCAmelCase : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : int = output.encoder_hidden_states
UpperCAmelCase : Any = output.pixel_decoder_hidden_states
UpperCAmelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Tuple:
with torch.no_grad():
UpperCAmelCase : str = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
comm_check_on_output(lowercase_ )
UpperCAmelCase : Any = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Optional[Any] = MaskFormerModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase : Tuple = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase : str = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_ ),
'class_labels': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
UpperCAmelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
UpperCAmelCase : Optional[int] = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ ).to(lowercase_ )
UpperCAmelCase : List[Any] = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Dict = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : Tuple = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[str] ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Optional[int] = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : List[str] = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ = 1e-4
def UpperCamelCase( ):
UpperCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**lowercase_ )
UpperCAmelCase : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase : Optional[int] = inputs['pixel_values'].to(lowercase_ )
UpperCAmelCase : Optional[Any] = [el.to(lowercase_ ) for el in inputs['mask_labels']]
UpperCAmelCase : List[str] = [el.to(lowercase_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 151 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Union[str, Any] , lowercase_ : int = 65536 , lowercase_ : Optional[int] = None , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 0 , lowercase_ : str = "fourier" , lowercase_ : bool = True , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase_ : Tuple[str] = "UNetMidBlock1D" , lowercase_ : str = None , lowercase_ : Tuple[int] = (32, 32, 64) , lowercase_ : str = None , lowercase_ : int = 8 , lowercase_ : int = 1 , lowercase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE_ : str = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowercase_ , log=lowercase_ , flip_sin_to_cos=lowercase_)
SCREAMING_SNAKE_CASE_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowercase_ , downscale_freq_shift=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE_ : Dict = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE_ : Dict = TimestepEmbedding(
in_channels=lowercase_ , time_embed_dim=lowercase_ , act_fn=lowercase_ , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE_ : int = nn.ModuleList([])
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.ModuleList([])
SCREAMING_SNAKE_CASE_ : List[str] = None
# down
SCREAMING_SNAKE_CASE_ : Tuple = in_channels
for i, down_block_type in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_channel
SCREAMING_SNAKE_CASE_ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE_ : List[str] = i == len(lowercase_) - 1
SCREAMING_SNAKE_CASE_ : Any = get_down_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowercase_)
# mid
SCREAMING_SNAKE_CASE_ : Dict = get_mid_block(
lowercase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowercase_ , add_downsample=lowercase_ , )
# up
SCREAMING_SNAKE_CASE_ : Optional[int] = list(reversed(lowercase_))
SCREAMING_SNAKE_CASE_ : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE_ : Any = out_channels
else:
SCREAMING_SNAKE_CASE_ : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Tuple = output_channel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(lowercase_) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE_ : Dict = i == len(lowercase_) - 1
SCREAMING_SNAKE_CASE_ : Tuple = get_up_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowercase_)
SCREAMING_SNAKE_CASE_ : int = output_channel
# out
SCREAMING_SNAKE_CASE_ : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
SCREAMING_SNAKE_CASE_ : str = get_out_block(
out_block_type=lowercase_ , num_groups_out=lowercase_ , embed_dim=block_out_channels[0] , out_channels=lowercase_ , act_fn=lowercase_ , fc_dim=block_out_channels[-1] // 4 , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : torch.FloatTensor , lowercase_ : Union[torch.Tensor, float, int] , lowercase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = timestep
if not torch.is_tensor(lowercase_):
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(lowercase_) and len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ : str = timesteps[None].to(sample.device)
SCREAMING_SNAKE_CASE_ : Dict = self.time_proj(lowercase_)
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.time_mlp(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = timestep_embed[..., None]
SCREAMING_SNAKE_CASE_ : int = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
SCREAMING_SNAKE_CASE_ : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = downsample_block(hidden_states=lowercase_ , temb=lowercase_)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE_ : Dict = self.mid_block(lowercase_ , lowercase_)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
SCREAMING_SNAKE_CASE_ : List[str] = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE_ : int = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE_ : Optional[int] = upsample_block(lowercase_ , res_hidden_states_tuple=lowercase_ , temb=lowercase_)
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE_ : str = self.out_block(lowercase_ , lowercase_)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase_)
| 318 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase_)
self.assertIsInstance(processor_fast.tokenizer , lowercase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase_)
self.assertIsInstance(processor_fast.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 318 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Tuple = logging.getLogger(__name__)
def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf_8""" ) as f:
A__ = csv.reader(__a )
A__ = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(__a )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(__a ) - 1
A__ = len(__a ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
A__ = tokenizer.convert_tokens_to_ids(__a )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(__a , __a , __a , *__a )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*__a )
A__ = RandomSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
A__ = TensorDataset(*__a )
A__ = SequentialSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , __a )
A__ = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 274 |
from sklearn.metrics import fa_score
import datasets
A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
A__ = fa_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase )
return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
| 274 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( *__A ) -> List[Any]:
'''simple docstring'''
if not isinstance(__A, __A ):
UpperCAmelCase__ = list(__A )
for i in range(len(__A ) ):
UpperCAmelCase__ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__A, __A ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase_ ( __A = None, __A = 128 ) -> Tuple:
'''simple docstring'''
if function is None:
return functools.partial(__A, starting_batch_size=__A )
UpperCAmelCase__ = starting_batch_size
def decorator(*__A, **__A ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase__ = list(inspect.signature(__A ).parameters.keys() )
# Guard against user error
if len(__A ) < (len(__A ) + 1):
UpperCAmelCase__ = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__A, *__A, **__A )
except Exception as e:
if should_reduce_batch_size(__A ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 143 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'facebook/bart-large-mnli'
__UpperCAmelCase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCAmelCase : Optional[int] = 'text_classifier'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSequenceClassification
__UpperCAmelCase : int = ['text', ['text']]
__UpperCAmelCase : Optional[int] = ['text']
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase__ = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 143 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase_ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase_ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __lowerCamelCase ( a_ : str , a_ : str ) -> tuple[str, float]:
__SCREAMING_SNAKE_CASE :Optional[Any] = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __lowerCamelCase ( a_ : str , a_ : str ) -> tuple[str, str]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = random.randint(0 , len(a_ ) - 1 )
__SCREAMING_SNAKE_CASE :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__SCREAMING_SNAKE_CASE :int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( a_ : str , a_ : list[str] ) -> str:
__SCREAMING_SNAKE_CASE :List[Any] = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__SCREAMING_SNAKE_CASE :Optional[int] = random.choice(a_ )
return "".join(a_ )
def __lowerCamelCase ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , ) -> list[str]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
# Generate more children proportionally to the fitness score.
__SCREAMING_SNAKE_CASE :Optional[Any] = int(parent_a[1] * 1_00 ) + 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
__SCREAMING_SNAKE_CASE :int = population_score[random.randint(0 , a_ )][0]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __lowerCamelCase ( a_ : str , a_ : list[str] , a_ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__SCREAMING_SNAKE_CASE :Union[str, Any] = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__SCREAMING_SNAKE_CASE :List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__SCREAMING_SNAKE_CASE :Union[str, Any] = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(a_ )
# Generate random starting population.
__SCREAMING_SNAKE_CASE :List[str] = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__SCREAMING_SNAKE_CASE :Dict = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
__SCREAMING_SNAKE_CASE :Optional[int] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__SCREAMING_SNAKE_CASE :List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
__SCREAMING_SNAKE_CASE :Optional[Any] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase_ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 10 , a_ : int = 22 ) -> int:
__SCREAMING_SNAKE_CASE :Optional[int] = range(1 , a_ )
__SCREAMING_SNAKE_CASE :List[Any] = range(1 , a_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }') | 239 | 1 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowerCAmelCase_ :Tuple = (wi_a, wi_a)
else:
lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ :List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ :Optional[Any] = layer_norm
lowerCAmelCase_ :Any = k.T
lowerCAmelCase_ :Tuple = o.T
lowerCAmelCase_ :Tuple = q.T
lowerCAmelCase_ :str = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :List[Any] = wi[0].T
lowerCAmelCase_ :Dict = wi[1].T
else:
lowerCAmelCase_ :int = wi.T
lowerCAmelCase_ :List[str] = wo.T
lowerCAmelCase_ :Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ :List[Any] = layer_norm
lowerCAmelCase_ :List[str] = k.T
lowerCAmelCase_ :Any = o.T
lowerCAmelCase_ :Any = q.T
lowerCAmelCase_ :Dict = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ :Optional[int] = layer_norm
lowerCAmelCase_ :str = k.T
lowerCAmelCase_ :Tuple = o.T
lowerCAmelCase_ :Any = q.T
lowerCAmelCase_ :int = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ :List[Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Any = wi[0].T
lowerCAmelCase_ :Any = wi[1].T
else:
lowerCAmelCase_ :Tuple = wi.T
lowerCAmelCase_ :List[str] = wo.T
lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ :Optional[Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T
return new
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ :Any = state_dict["""shared.weight"""]
return state_dict
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 84 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
__UpperCAmelCase = 3e8 # unit of c : m * s^-1
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ :Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 1 |
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
if not (isinstance(A__ , A__ ) and isinstance(A__ , A__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowerCAmelCase_ : Dict = len(A__ )
lowerCAmelCase_ : Optional[Any] = len(A__ )
lowerCAmelCase_ : Optional[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Any = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCAmelCase_ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCAmelCase_ : str = i
lowerCAmelCase_ : Tuple = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple=False ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase_ ( A__ : Any , A__ : Any , A__ : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Optional[Any] = """"""
else:
lowerCAmelCase_ : Optional[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = dct.pop(A__ )
lowerCAmelCase_ : Tuple = val
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ViTConfig()
lowerCAmelCase_ : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = int(vit_name[-12:-10] )
lowerCAmelCase_ : Optional[int] = int(vit_name[-9:-6] )
else:
lowerCAmelCase_ : Optional[int] = 10_00
lowerCAmelCase_ : Tuple = """huggingface/label-files"""
lowerCAmelCase_ : Any = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Union[str, Any] = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = int(vit_name[-6:-4] )
lowerCAmelCase_ : Dict = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
lowerCAmelCase_ : int = 1_92
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : List[str] = 12
lowerCAmelCase_ : int = 3
elif vit_name[9:].startswith("""small""" ):
lowerCAmelCase_ : Optional[Any] = 3_84
lowerCAmelCase_ : Optional[int] = 15_36
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : str = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Any = 23_04
lowerCAmelCase_ : List[str] = 8
lowerCAmelCase_ : List[str] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
lowerCAmelCase_ : Dict = 10_24
lowerCAmelCase_ : List[Any] = 40_96
lowerCAmelCase_ : Any = 24
lowerCAmelCase_ : List[str] = 16
elif vit_name[4:].startswith("""huge""" ):
lowerCAmelCase_ : Optional[int] = 12_80
lowerCAmelCase_ : Dict = 51_20
lowerCAmelCase_ : Union[str, Any] = 32
lowerCAmelCase_ : Optional[int] = 16
# load original model from timm
lowerCAmelCase_ : Union[str, Any] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : int = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
lowerCAmelCase_ : str = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : int = ViTModel(A__ ).eval()
else:
lowerCAmelCase_ : Optional[int] = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase_ : Any = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase_ : Any = ViTImageProcessor(size=config.image_size )
lowerCAmelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : int = encoding["""pixel_values"""]
lowerCAmelCase_ : int = model(A__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase_ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 89 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Tuple = Dict[str, Any]
_UpperCamelCase : str = List[Prediction]
@add_end_docstrings(__a)
class snake_case__ ( __a):
def __init__( self : List[Any] , *_A : str , **_A : List[Any] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def A ( self : List[Any] , **_A : Any ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = {}
if "threshold" in kwargs:
UpperCAmelCase_ : Dict = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Any , *_A : str , **_A : List[str] ) -> Dict:
return super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def A ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : str = load_image(lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase_ : Tuple = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
UpperCAmelCase_ : str = target_size
return inputs
def A ( self : Tuple , _A : int ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = model_inputs.pop('''target_size''' )
UpperCAmelCase_ : List[str] = self.model(**lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase_ : Optional[Any] = model_inputs['''bbox''']
return model_outputs
def A ( self : Any , _A : List[str] , _A : List[Any]=0.9 ) -> Tuple:
UpperCAmelCase_ : int = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ : Any = target_size[0].tolist()
def unnormalize(_A : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
UpperCAmelCase_ , UpperCAmelCase_ : int = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase_ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ : List[str] = [unnormalize(lowerCAmelCase__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
UpperCAmelCase_ : Optional[Any] = ['''score''', '''label''', '''box''']
UpperCAmelCase_ : Optional[int] = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) for vals in zip(scores.tolist() , lowerCAmelCase__ , lowerCAmelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ : str = self.image_processor.post_process_object_detection(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : Any = raw_annotations[0]
UpperCAmelCase_ : Optional[Any] = raw_annotation['''scores''']
UpperCAmelCase_ : List[str] = raw_annotation['''labels''']
UpperCAmelCase_ : Union[str, Any] = raw_annotation['''boxes''']
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Union[str, Any] = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ : List[Any] = [self._get_bounding_box(lowerCAmelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ : Dict = ['''score''', '''label''', '''box''']
UpperCAmelCase_ : Tuple = [
dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def A ( self : Union[str, Any] , _A : str ) -> Union[str, Any]:
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = box.int().tolist()
UpperCAmelCase_ : List[str] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 304 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = KandinskyVaaImgaImgPipeline
__lowercase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''']
__lowercase : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__lowercase : Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase : Tuple = False
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return self.time_input_dim
@property
def snake_case_ ( self):
return self.time_input_dim * 4
@property
def snake_case_ ( self):
return 1_0_0
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**lowerCAmelCase__)
return model
@property
def snake_case_ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__SCREAMING_SNAKE_CASE = DDIMScheduler(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase__)
# create init_image
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""").resize((2_5_6, 2_5_6))
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
__SCREAMING_SNAKE_CASE = """A red cartoon frog, 4k"""
__SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""").manual_seed(0)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__SCREAMING_SNAKE_CASE = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 100 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "megatron-bert"
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict=29_056 , __SCREAMING_SNAKE_CASE : Tuple=1_024 , __SCREAMING_SNAKE_CASE : int=24 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : int=True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache | 371 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_choices
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_000
__SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 'detr'
UpperCamelCase_ : Optional[int] = ['past_key_values']
UpperCamelCase_ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=1_00 , SCREAMING_SNAKE_CASE_ : int=6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=20_48 , SCREAMING_SNAKE_CASE_ : List[Any]=8 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : Tuple=20_48 , SCREAMING_SNAKE_CASE_ : Dict=8 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_56 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict="sine" , SCREAMING_SNAKE_CASE_ : int="resnet50" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A: List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A: str = backbone_config.get('''model_type''' )
A: List[str] = CONFIG_MAPPING[backbone_model_type]
A: int = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
A: List[Any] = None, None, None
A: int = use_timm_backbone
A: Tuple = backbone_config
A: Dict = num_channels
A: Tuple = num_queries
A: str = d_model
A: Union[str, Any] = encoder_ffn_dim
A: Optional[int] = encoder_layers
A: Tuple = encoder_attention_heads
A: List[Any] = decoder_ffn_dim
A: Optional[Any] = decoder_layers
A: Optional[int] = decoder_attention_heads
A: Union[str, Any] = dropout
A: List[Any] = attention_dropout
A: Union[str, Any] = activation_dropout
A: Tuple = activation_function
A: Optional[Any] = init_std
A: int = init_xavier_std
A: List[Any] = encoder_layerdrop
A: List[str] = decoder_layerdrop
A: Optional[Any] = encoder_layers
A: Optional[Any] = auxiliary_loss
A: List[Any] = position_embedding_type
A: Tuple = backbone
A: str = use_pretrained_backbone
A: str = dilation
# Hungarian matcher
A: Any = class_cost
A: str = bbox_cost
A: List[str] = giou_cost
# Loss coefficients
A: Tuple = mask_loss_coefficient
A: Tuple = dice_loss_coefficient
A: Union[str, Any] = bbox_loss_coefficient
A: List[str] = giou_loss_coefficient
A: Tuple = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def _snake_case ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
'''simple docstring'''
return cls(backbone_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] ) -> Dict[str, any]:
'''simple docstring'''
A: Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A: int = self.backbone_config.to_dict()
A: Any = self.__class__.model_type
return output
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _snake_case ( self : str ) -> float:
'''simple docstring'''
return 1E-5
@property
def _snake_case ( self : Dict ) -> int:
'''simple docstring'''
return 12
| 319 |
import sys
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase :Optional[Any] = a + chain_length - 1
UpperCamelCase :int = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase :int = cost
UpperCamelCase :List[str] = c
return matrix, sol
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ )
print(''')''' , end=''' ''' )
def _A ( ):
UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 259 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[str] = RoCBertTokenizer
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = filter_non_english
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for i, value in enumerate(_UpperCamelCase ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_UpperCamelCase , _UpperCamelCase , ensure_ascii=_UpperCamelCase )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_UpperCamelCase , _UpperCamelCase , ensure_ascii=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_UpperCamelCase , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCamelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase__ = {}
for i, token in enumerate(_UpperCamelCase ):
lowerCAmelCase__ = i
lowerCAmelCase__ = RoCBertWordpieceTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase__ = tokenizer_r.encode_plus(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase , )
lowerCAmelCase__ = tokenizer_r.do_lower_case if hasattr(_UpperCamelCase , 'do_lower_case' ) else False
lowerCAmelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['的', '人', '有']
lowerCAmelCase__ = ''.join(_UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(_UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = False
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(_UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_UpperCamelCase )
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ = tokenizer.encode('你好' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode('你是谁' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ = '你好,你是谁'
lowerCAmelCase__ = tokenizer.tokenize(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_tokens_to_shape_ids(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.prepare_for_model(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 122 |
def _UpperCamelCase ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a__ = logging.get_logger(__name__)
class snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
snake_case_ : List[Any] = CLIPConfig
snake_case_ : Any = ["""CLIPEncoderLayer"""]
def __init__( self : Any , lowerCAmelCase : int) -> str:
"""simple docstring"""
super().__init__(_a)
_snake_case : List[Any] = CLIPVisionModelWithProjection(config.vision_config)
_snake_case : Dict = nn.Linear(config.vision_config.projection_dim , 1)
_snake_case : Dict = nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : List[Any]=0.5 , lowerCAmelCase : List[str]=0.5) -> List[str]:
"""simple docstring"""
_snake_case : List[str] = self.vision_model(_a)[0]
_snake_case : List[str] = self.p_head(_a)
_snake_case : str = nsfw_detected.flatten()
_snake_case : Tuple = nsfw_detected > p_threshold
_snake_case : Dict = nsfw_detected.tolist()
if any(_a):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""")
for idx, nsfw_detected_ in enumerate(_a):
if nsfw_detected_:
_snake_case : int = np.zeros(images[idx].shape)
_snake_case : List[Any] = self.w_head(_a)
_snake_case : int = watermark_detected.flatten()
_snake_case : List[str] = watermark_detected > w_threshold
_snake_case : Tuple = watermark_detected.tolist()
if any(_a):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""")
for idx, watermark_detected_ in enumerate(_a):
if watermark_detected_:
_snake_case : Optional[int] = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 317 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
lowerCamelCase = """"""
lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" )
lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase = []
lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
lowerCamelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCamelCase = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
lowerCamelCase = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
lowerCamelCase = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 291 | 0 |
'''simple docstring'''
from random import randint, random
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : int = 5 , ) -> list:
"""simple docstring"""
lowerCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
lowerCAmelCase = 0
lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
lowerCAmelCase = (
randint(0 , _SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(_SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_SCREAMING_SNAKE_CASE , -1 )
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ) -> list:
"""simple docstring"""
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
lowerCAmelCase = [-1] * number_of_cells
for car_index in range(_SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCAmelCase = min(highway_now[car_index] + 1 , _SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
lowerCAmelCase = get_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
lowerCAmelCase = min(next_highway[car_index] , _SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
lowerCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ) -> list:
"""simple docstring"""
lowerCAmelCase = len(highway[0] )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = update(highway[i] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = [-1] * number_of_cells
for car_index in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCAmelCase = speed
highway.append(_SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 367 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [vocab_size]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
lowerCAmelCase = keep_order
lowerCAmelCase = []
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> int:
if self.n_clusters > 0:
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" )
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , )
self.out_projs.append(A_ )
else:
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.d_embed // (self.div_val**i)
lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' )
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(A_ )
@staticmethod
def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]:
lowerCAmelCase = x
if proj is not None:
lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ )
return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(A_ , A_ )
def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple:
lowerCAmelCase = 0
if self.n_clusters == 0:
lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ )
lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 )
else:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = []
lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase = (target >= l_idx) & (target < r_idx)
lowerCAmelCase = tf.where(A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i][0]
lowerCAmelCase = self.out_layers[i][1]
if i == 0:
lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
else:
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_ )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) )
lowerCAmelCase = tf.concat(A_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase = tf.reduce_mean(A_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out | 187 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Union[str, Any] = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
a_ : int = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(_lowercase )
from datasets import load_dataset
a_ : Tuple = load_dataset("""nielsr/rvlcdip-demo""" )
a_ : Dict = dataset["""train"""][0]["""image"""].convert("""RGB""" )
a_ : int = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
a_ : List[Any] = model(**_lowercase )
a_ : Dict = outputs.logits
a_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , _lowercase )
a_ : Tuple = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_lowercase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 ) )
| 248 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Tuple = logging.getLogger()
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = argparse.ArgumentParser()
parser.add_argument("""-f""")
a_ : Any = parser.parse_args()
return args.f
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> None:
a_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Dict:
a_ : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowercase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_lowercase )
a_ : Tuple = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowercase )
a_ : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowercase )
| 248 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowercase_ = """bert-base-cased"""
lowercase_ = """fp16"""
lowercase_ = """bf16"""
lowercase_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
super().setUp()
lowercase__ = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(a ):
lowercase__ = self.dist_env.copy()
lowercase__ = f"""{i + 1}"""
lowercase__ = strategy
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(a ):
lowercase__ = self.dist_env.copy()
lowercase__ = prefetch_policy
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(a ):
lowercase__ = self.dist_env.copy()
lowercase__ = state_dict_type
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> str:
"""simple docstring"""
lowercase__ = AutoModel.from_pretrained(a )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase__ = self.dist_env.copy()
lowercase__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase__ = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
lowercase__ = '2000'
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase__ = self.dist_env.copy()
lowercase__ = 'TRANSFORMER_BASED_WRAP'
lowercase__ = 'T5Layer'
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
with self.assertRaises(a ) as cm:
fsdp_plugin.set_auto_wrap_policy(a )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
lowercase__ = self.dist_env.copy()
lowercase__ = 'SIZE_BASED_WRAP'
lowercase__ = '0'
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase__ = self.dist_env.copy()
lowercase__ = mp_dtype
with mockenv_context(**a ):
lowercase__ = Accelerator()
if mp_dtype == "fp16":
lowercase__ = torch.floataa
elif mp_dtype == "bf16":
lowercase__ = torch.bfloataa
lowercase__ = MixedPrecision(param_dtype=a , reduce_dtype=a , buffer_dtype=a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase__ = self.dist_env.copy()
lowercase__ = str(a ).lower()
with mockenv_context(**a ):
lowercase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=a ) )
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().setUp()
lowercase__ = 0.82
lowercase__ = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
lowercase__ = {
'multi_gpu_fp16': 3_200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2_000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase__ = 160
lowercase__ = 160
lowercase__ = inspect.getfile(accelerate.test_utils )
lowercase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
lowercase__ = os.path.join(self.test_scripts_folder , 'test_performance.py' )
lowercase__ = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
lowercase__ = cmd.copy()
for i, strategy in enumerate(a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
lowercase__ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(a ):
lowercase__ = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
lowercase__ = len(a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase__ = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
lowercase__ = cmd_config[:-1]
lowercase__ = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
lowercase__ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
| 269 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ) -> Dict:
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
lowercase__ = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def __UpperCamelCase () -> Tuple:
PartialState().wait_for_everyone()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase (**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for key, value in kwargs.items():
lowercase__ = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowercase__ = value
return destination
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = None ) -> bool:
if port is None:
lowercase__ = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 269 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__lowercase = getattr(A__ , A__ )
if weight_type is not None:
__lowercase = getattr(A__ , A__ ).shape
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
elif weight_type == "running_mean":
__lowercase = value
elif weight_type == "running_var":
__lowercase = value
elif weight_type == "num_batches_tracked":
__lowercase = value
elif weight_type == "inv_freq":
__lowercase = value
else:
__lowercase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , A__ )
if "pos_bias_u" in name:
__lowercase = None
elif "pos_bias_v" in name:
__lowercase = None
elif "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
elif "running_mean" in name:
__lowercase = '''running_mean'''
elif "inv_freq" in name:
__lowercase = '''inv_freq'''
elif "running_var" in name:
__lowercase = '''running_var'''
elif "num_batches_tracked" in name:
__lowercase = '''num_batches_tracked'''
else:
__lowercase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__=None , A__=None , A__=True ):
"""simple docstring"""
if config_path is not None:
__lowercase = WavaVecaConformerConfig.from_pretrained(A__ , hidden_act='''swish''' )
else:
__lowercase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase = '''rotary'''
if is_finetuned:
if dict_path:
__lowercase = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(A__ , '''vocab.json''' )
if not os.path.isdir(A__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
__lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase = 0
__lowercase = 1
with open(A__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A__ , A__ )
__lowercase = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A__ , )
__lowercase = True if config.feat_extract_norm == '''layer''' else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
__lowercase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
__lowercase = WavaVecaConformerForCTC(A__ )
else:
__lowercase = WavaVecaConformerForPreTraining(A__ )
if is_finetuned:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowercase = argparse.Namespace(task='''audio_pretraining''' )
__lowercase = fairseq.tasks.setup_task(A__ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ )
__lowercase = model[0].eval()
recursively_load_weights(A__ , A__ , not is_finetuned )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 104 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _A ( A__=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=A__ , default=A__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=A__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=A__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=A__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def _A ( A__ ):
"""simple docstring"""
__lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(A__ ):
__lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase = defaults.commands
if not args.tpu_name:
__lowercase = defaults.tpu_name
if not args.tpu_zone:
__lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , A__ ):
__lowercase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , A__ ):
__lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowercase = '''; '''.join(A__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(A__ )}" )
return
subprocess.run(A__ )
print('''Successfully setup pod.''' )
def _A ( ):
"""simple docstring"""
__lowercase = tpu_command_parser()
__lowercase = parser.parse_args()
tpu_command_launcher(A__ )
| 104 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_1_2,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _lowerCAmelCase ( lowercase ) -> Dict:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
_a : Union[str, Any] = parser.parse_args()
_a : Optional[Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 359 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ = [True] * (num + 1)
A__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowercase__ ):
A__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 68 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return x + 2
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = """x = 3"""
snake_case_ : str = {}
snake_case_ : List[str] = evaluate(lowercase_ , {} , state=lowercase_ )
assert result == 3
self.assertDictEqual(lowercase_ , {'''x''': 3} )
snake_case_ : Tuple = """x = y"""
snake_case_ : str = {"""y""": 5}
snake_case_ : Dict = evaluate(lowercase_ , {} , state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'''x''': 5, '''y''': 5} )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = """y = add_two(x)"""
snake_case_ : int = {"""x""": 3}
snake_case_ : Optional[Any] = evaluate(lowercase_ , {'''add_two''': add_two} , state=lowercase_ )
assert result == 5
self.assertDictEqual(lowercase_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ : Optional[Any] = evaluate(lowercase_ , {} , state=lowercase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = """x = 3"""
snake_case_ : List[Any] = {}
snake_case_ : Any = evaluate(lowercase_ , {} , state=lowercase_ )
assert result == 3
self.assertDictEqual(lowercase_ , {'''x''': 3} )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = """test_dict = {'x': x, 'y': add_two(x)}"""
snake_case_ : Optional[int] = {"""x""": 3}
snake_case_ : str = evaluate(lowercase_ , {'''add_two''': add_two} , state=lowercase_ )
self.assertDictEqual(lowercase_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(lowercase_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = """x = 3\ny = 5"""
snake_case_ : int = {}
snake_case_ : List[str] = evaluate(lowercase_ , {} , state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'''x''': 3, '''y''': 5} )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = """text = f'This is x: {x}.'"""
snake_case_ : Optional[Any] = {"""x""": 3}
snake_case_ : Optional[Any] = evaluate(lowercase_ , {} , state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowercase_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """if x <= 3:\n y = 2\nelse:\n y = 5"""
snake_case_ : List[Any] = {"""x""": 3}
snake_case_ : Tuple = evaluate(lowercase_ , {} , state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowercase_ , {'''x''': 3, '''y''': 2} )
snake_case_ : Tuple = {"""x""": 8}
snake_case_ : Tuple = evaluate(lowercase_ , {} , state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'''x''': 8, '''y''': 5} )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = """test_list = [x, add_two(x)]"""
snake_case_ : Dict = {"""x""": 3}
snake_case_ : Any = evaluate(lowercase_ , {'''add_two''': add_two} , state=lowercase_ )
self.assertListEqual(lowercase_ , [3, 5] )
self.assertDictEqual(lowercase_ , {'''x''': 3, '''test_list''': [3, 5]} )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = """y = x"""
snake_case_ : str = {"""x""": 3}
snake_case_ : Dict = evaluate(lowercase_ , {} , state=lowercase_ )
assert result == 3
self.assertDictEqual(lowercase_ , {'''x''': 3, '''y''': 3} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = """test_list = [x, add_two(x)]\ntest_list[1]"""
snake_case_ : Union[str, Any] = {"""x""": 3}
snake_case_ : Any = evaluate(lowercase_ , {'''add_two''': add_two} , state=lowercase_ )
assert result == 5
self.assertDictEqual(lowercase_ , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ : Dict = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
snake_case_ : Optional[int] = {"""x""": 3}
snake_case_ : Any = evaluate(lowercase_ , {'''add_two''': add_two} , state=lowercase_ )
assert result == 5
self.assertDictEqual(lowercase_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = """x = 0\nfor i in range(3):\n x = i"""
snake_case_ : Optional[int] = {}
snake_case_ : List[Any] = evaluate(lowercase_ , {'''range''': range} , state=lowercase_ )
assert result == 2
self.assertDictEqual(lowercase_ , {'''x''': 2, '''i''': 2} )
| 371 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = 13 , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 128 , lowerCAmelCase_ : str=[16, 32, 64, 128] , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 37 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 10 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 128 , lowerCAmelCase_ : List[int] = [2, 2, 2, 2] , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> Tuple:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Tuple = encoder_stride
UpperCAmelCase_ : List[Any] = num_attention_outputs
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Dict = embed_dim + 1
UpperCAmelCase_ : List[Any] = resolution
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : Optional[Any] = hidden_sizes
UpperCAmelCase_ : Optional[Any] = dim
UpperCAmelCase_ : Any = mlp_expansion_ratio
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = TFEfficientFormerModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Any:
UpperCAmelCase_ : int = self.type_sequence_label_size
UpperCAmelCase_ : Optional[int] = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[str] = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_ : Tuple = TFEfficientFormerModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ):
UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , "encoder_seq_length" ):
UpperCAmelCase_ : Tuple = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
UpperCAmelCase_ : int = seq_length * self.model_tester.chunk_length
else:
UpperCAmelCase_ : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCAmelCase_ : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = getattr(self.model_tester , "seq_length" , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=False ) -> Any:
UpperCAmelCase_ : Any = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester , "seq_length" , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = getattr(self.model_tester , "key_length" , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = getattr(self.model_tester , "chunk_length" , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
UpperCAmelCase_ : Tuple = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Tuple = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCAmelCase_ : str = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
UpperCAmelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Optional[Any] = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : Any = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_ : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
UpperCAmelCase_ : int = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 268 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 1 |
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = abs(A__ )
snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = abs(A__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
return sum(int(A__ ) for c in str(abs(A__ ) ) )
def lowercase_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A__ , A__ ) -> None:
snake_case = F'{func.__name__}({value})'
snake_case = timeit(F'__main__.{call}' , setup="import __main__" )
print(F'{call:56} = {func(A__ )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A__ , A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 137 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "LayoutLMv2ImageProcessor"
UpperCAmelCase__ : Optional[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self : str , _A : Any=None , _A : Tuple=None , **_A : Optional[Any] ) -> Optional[int]:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
snake_case = kwargs.pop("feature_extractor" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A , _A )
def __call__(self : int , _A : List[str] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["words"]
snake_case = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(_A , encoded_inputs["overflow_to_sample_mapping"] )
snake_case = images
return encoded_inputs
def UpperCAmelCase(self : Dict , _A : Dict , _A : List[str] ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(_A )} and {len(_A )}' )
return images_with_overflow
def UpperCAmelCase(self : Tuple , *_A : int , **_A : Dict ) -> str:
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase(self : str , *_A : List[Any] , **_A : List[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase(self : List[Any] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , )
return self.image_processor_class
@property
def UpperCAmelCase(self : Dict ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , )
return self.image_processor
| 137 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = BartphoTokenizer
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : str = True
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().setUp()
snake_case_ = ["▁This", "▁is", "▁a", "▁t", "est"]
snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
snake_case_ = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **a__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
snake_case_ = "This is a là test"
snake_case_ = "This is a<unk><unk> test"
return input_text, output_text
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
snake_case_ = "This is a là test"
snake_case_ = "▁This ▁is ▁a ▁l à ▁t est".split()
snake_case_ = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 85 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "LayoutLMv2ImageProcessor"
snake_case__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : int , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[int] ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__lowerCamelCase : Dict = kwargs.pop("feature_extractor" )
__lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : str , UpperCAmelCase : str , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : List[str] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
__lowerCamelCase : Tuple = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase : Optional[Any] = features["words"]
__lowerCamelCase : Optional[int] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCamelCase : Any = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__lowerCamelCase : Optional[Any] = self.get_overflowing_images(UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
__lowerCamelCase : Optional[int] = images
return encoded_inputs
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCamelCase : Optional[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(UpperCAmelCase )} and {len(UpperCAmelCase )}""" )
return images_with_overflow
def lowerCamelCase__ ( self : int , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor | 64 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 64 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
__snake_case : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def _lowercase ( __snake_case ) -> int:
if root is None:
return 0
# Validation
def count_nodes(__snake_case ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__snake_case ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE__ ) != count_coins(SCREAMING_SNAKE_CASE__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__snake_case ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
__lowerCAmelCase : Optional[Any] = get_distrib(node.left )
__lowerCAmelCase : Optional[Any] = get_distrib(node.right )
__lowerCAmelCase : Optional[Any] = 1 - left_distrib_excess
__lowerCAmelCase : Union[str, Any] = 1 - right_distrib_excess
__lowerCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE__ )
+ abs(SCREAMING_SNAKE_CASE__ )
)
__lowerCAmelCase : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return get_distrib(SCREAMING_SNAKE_CASE__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 |
'''simple docstring'''
from math import factorial
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 125 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=30 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.02 , __lowercase=None , ) -> str:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :Dict = batch_size
__UpperCamelCase :Optional[int] = image_size
__UpperCamelCase :int = patch_size
__UpperCamelCase :List[str] = num_channels
__UpperCamelCase :Any = is_training
__UpperCamelCase :int = use_labels
__UpperCamelCase :str = hidden_size
__UpperCamelCase :Dict = num_hidden_layers
__UpperCamelCase :Union[str, Any] = num_attention_heads
__UpperCamelCase :str = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Union[str, Any] = hidden_dropout_prob
__UpperCamelCase :Tuple = attention_probs_dropout_prob
__UpperCamelCase :Optional[int] = type_sequence_label_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :int = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase :List[Any] = (image_size // patch_size) ** 2
__UpperCamelCase :Optional[Any] = num_patches + 1
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :Any = None
if self.use_labels:
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Optional[int]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Optional[int]:
__UpperCamelCase :str = ViTMSNModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCamelCase :List[str] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = self.type_sequence_label_size
__UpperCamelCase :str = ViTMSNForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCamelCase :Tuple = model(lowercase__ , labels=lowercase__)
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''')
print('''Labels: {labels}''')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__UpperCamelCase :str = 1
__UpperCamelCase :Dict = ViTMSNForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCamelCase :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCamelCase :Optional[int] = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = config_and_inputs
__UpperCamelCase :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
a__ : Optional[int] = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
a__ : Optional[Any] = False
a__ : Dict = False
a__ : Tuple = False
a__ : str = False
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = ViTMSNModelTester(self)
__UpperCamelCase :Optional[int] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37)
def UpperCamelCase__ ( self) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> Tuple:
pass
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase , __UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Any = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__UpperCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(lowercase__)
__UpperCamelCase :Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
__UpperCamelCase :int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
@slow
def UpperCamelCase__ ( self) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[Any] = ViTMSNModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> int:
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self) -> Tuple:
torch.manual_seed(2)
__UpperCamelCase :Tuple = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''').to(lowercase__)
__UpperCamelCase :Dict = self.default_image_processor
__UpperCamelCase :Any = prepare_img()
__UpperCamelCase :Optional[Any] = image_processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
# forward pass
with torch.no_grad():
__UpperCamelCase :int = model(**lowercase__)
# verify the logits
__UpperCamelCase :List[str] = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCamelCase :Optional[int] = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4))
| 358 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = 0
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Dict = Path(__lowercase) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :Union[str, Any] = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :str = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Union[str, Any] = Path(__lowercase) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Optional[Any] = Path(__lowercase) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase).to_dict()
config_dict.pop('''image_processor_type''')
__UpperCamelCase :List[str] = CLIPImageProcessor(**__lowercase)
# save in new folder
model_config.save_pretrained(__lowercase)
config.save_pretrained(__lowercase)
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase)
# make sure private variable is not incorrectly saved
__UpperCamelCase :Union[str, Any] = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
__lowercase , '''clip-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''')
def UpperCamelCase__ ( self) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :str = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> List[str]:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def UpperCamelCase__ ( self) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
__UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase)
__UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def UpperCamelCase__ ( self) -> Optional[Any]:
try:
AutoConfig.register('''custom''' , __lowercase)
AutoImageProcessor.register(__lowercase , __lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoImageProcessor.register(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :List[str] = Path(__lowercase) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :int = CustomImageProcessor.from_pretrained(__lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase)
__UpperCamelCase :int = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> List[Any]:
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = True
try:
AutoConfig.register('''custom''' , __lowercase)
AutoImageProcessor.register(__lowercase , __lowercase)
# If remote code is not set, the default is to use local
__UpperCamelCase :str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(__lowercase , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 105 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__a : int = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=None ) -> List[Any]:
'''simple docstring'''
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = load_image(lowerCAmelCase__ )
__lowercase = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model(**lowerCAmelCase__ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=5 ) -> List[Any]:
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(lowerCAmelCase__ )
elif self.framework == "tf":
__lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowercase = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
__lowercase , __lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )] | 210 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = '''data2vec-text'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 210 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_SCREAMING_SNAKE_CASE : int = None
try:
import msvcrt
except ImportError:
_SCREAMING_SNAKE_CASE : Tuple = None
try:
import fcntl
except ImportError:
_SCREAMING_SNAKE_CASE : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_SCREAMING_SNAKE_CASE : Tuple = OSError
# Data
# ------------------------------------------------
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
_SCREAMING_SNAKE_CASE : Optional[int] = '''3.0.12'''
_SCREAMING_SNAKE_CASE : Optional[Any] = None
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _logger
SCREAMING_SNAKE_CASE__ = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = lock_file
return None
def __str__( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = lock
return None
def __enter__( self : List[str] ) -> Any:
return self.lock
def __exit__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Tuple:
self.lock.release()
return None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple=-1 , __lowerCamelCase : List[str]=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
SCREAMING_SNAKE_CASE__ = self.hash_filename_if_too_long(__lowerCamelCase , __lowerCamelCase )
# The path to the lock file.
SCREAMING_SNAKE_CASE__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
SCREAMING_SNAKE_CASE__ = None
# The default timeout value.
SCREAMING_SNAKE_CASE__ = timeout
# We use this lock primarily for the lock counter.
SCREAMING_SNAKE_CASE__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
SCREAMING_SNAKE_CASE__ = 0
return None
@property
def lowercase_ ( self : Union[str, Any] ) -> str:
return self._lock_file
@property
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
return self._timeout
@timeout.setter
def lowercase_ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = float(__lowerCamelCase )
return None
def lowercase_ ( self : Dict ) -> Union[str, Any]:
raise NotImplementedError()
def lowercase_ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError()
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._lock_file_fd is not None
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=0.05 ) -> str:
# Use the default timeout, if no timeout is provided.
if timeout is None:
SCREAMING_SNAKE_CASE__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
SCREAMING_SNAKE_CASE__ = id(self )
SCREAMING_SNAKE_CASE__ = self._lock_file
SCREAMING_SNAKE_CASE__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(__lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
SCREAMING_SNAKE_CASE__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase_ ( self : Tuple , __lowerCamelCase : Tuple=False ) -> Dict:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
SCREAMING_SNAKE_CASE__ = id(self )
SCREAMING_SNAKE_CASE__ = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
SCREAMING_SNAKE_CASE__ = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : List[str] ) -> Optional[int]:
self.acquire()
return self
def __exit__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple:
self.release()
return None
def __del__( self : Tuple ) -> Tuple:
self.release(force=__lowerCamelCase )
return None
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = os.path.basename(__lowerCamelCase )
if len(__lowerCamelCase ) > max_length and max_length > 0:
SCREAMING_SNAKE_CASE__ = os.path.dirname(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = str(hash(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = filename[: max_length - len(__lowerCamelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__lowerCamelCase , __lowerCamelCase )
else:
return path
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : str=-1 , __lowerCamelCase : Optional[Any]=None ) -> Optional[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(__lowerCamelCase , timeout=__lowerCamelCase , max_filename_length=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowercase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE__ = os.open(self._lock_file , __lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__lowerCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = fd
return None
def lowercase_ ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = self._lock_file_fd
SCREAMING_SNAKE_CASE__ = None
msvcrt.locking(__lowerCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(__lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int=-1 , __lowerCamelCase : str=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = os.statvfs(os.path.dirname(__lowerCamelCase ) ).f_namemax
super().__init__(__lowerCamelCase , timeout=__lowerCamelCase , max_filename_length=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
SCREAMING_SNAKE_CASE__ = os.open(self._lock_file , __lowerCamelCase )
try:
fcntl.flock(__lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = fd
return None
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
SCREAMING_SNAKE_CASE__ = self._lock_file_fd
SCREAMING_SNAKE_CASE__ = None
fcntl.flock(__lowerCamelCase , fcntl.LOCK_UN )
os.close(__lowerCamelCase )
return None
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE__ = os.open(self._lock_file , __lowerCamelCase )
except OSError:
pass
else:
SCREAMING_SNAKE_CASE__ = fd
return None
def lowercase_ ( self : Optional[Any] ) -> int:
os.close(self._lock_file_fd )
SCREAMING_SNAKE_CASE__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_SCREAMING_SNAKE_CASE : Any = None
if msvcrt:
_SCREAMING_SNAKE_CASE : str = WindowsFileLock
elif fcntl:
_SCREAMING_SNAKE_CASE : List[Any] = UnixFileLock
else:
_SCREAMING_SNAKE_CASE : Any = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 355 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(_A , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 218 | 0 |
def lowercase__ ( __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[Any]=0 ):
'''simple docstring'''
return sorted(__snake_case , key=lambda __snake_case : x[column] )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int]=float('inf' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , __snake_case ):
UpperCAmelCase_ : int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Union[str, Any] = current_dis
return min_dis
def lowercase__ ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=float('inf' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , __snake_case ):
for j in range(max(0 , i - 6 ) , __snake_case ):
UpperCAmelCase_ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : List[str] = current_dis
return min_dis
def lowercase__ ( __snake_case : Any , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(__snake_case , __snake_case )
# recursion
UpperCAmelCase_ : int = points_counts // 2
UpperCAmelCase_ : Union[str, Any] = closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[:mid] , __snake_case )
UpperCAmelCase_ : Dict = closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase_ : List[Any] = min(__snake_case , __snake_case )
UpperCAmelCase_ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__snake_case )
UpperCAmelCase_ : int = dis_between_closest_in_strip(
__snake_case , len(__snake_case ) , __snake_case )
return min(__snake_case , __snake_case )
def lowercase__ ( __snake_case : List[Any] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = column_based_sort(__snake_case , column=0 )
UpperCAmelCase_ : Optional[int] = column_based_sort(__snake_case , column=1 )
return (
closest_pair_of_points_sqr(
__snake_case , __snake_case , __snake_case )
) ** 0.5
if __name__ == "__main__":
__UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
def __init__( self : int, lowerCamelCase : pyspark.sql.DataFrame, lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : bool = True, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : str = None, lowerCamelCase : bool = True, lowerCamelCase : str = "arrow", **lowerCamelCase : str, )-> List[str]:
super().__init__(
split=_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase, streaming=_UpperCamelCase, **_UpperCamelCase, )
lowerCamelCase__ : Optional[Any] =load_from_cache_file
lowerCamelCase__ : Dict =file_format
lowerCamelCase__ : Tuple =Spark(
df=_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase, working_dir=_UpperCamelCase, **_UpperCamelCase, )
def snake_case ( self : str )-> str:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase__ : Union[str, Any] =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 358 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = CLIPConfig
_a = ['CLIPEncoderLayer']
def __init__( self : int, lowerCamelCase : CLIPConfig )-> List[Any]:
super().__init__(lowerCamelCase )
lowerCamelCase__ : Dict =CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase__ : Dict =nn.Linear(config.vision_config.projection_dim, 1 )
lowerCamelCase__ : Union[str, Any] =nn.Linear(config.vision_config.projection_dim, 1 )
@torch.no_grad()
def snake_case ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : List[Any]=0.5, lowerCamelCase : Optional[Any]=0.5 )-> Optional[int]:
lowerCamelCase__ : Dict =self.vision_model(lowerCamelCase )[0]
lowerCamelCase__ : List[Any] =self.p_head(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =nsfw_detected.flatten()
lowerCamelCase__ : Any =nsfw_detected > p_threshold
lowerCamelCase__ : Optional[Any] =nsfw_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(lowerCamelCase ):
if nsfw_detected_:
lowerCamelCase__ : Tuple =np.zeros(images[idx].shape )
lowerCamelCase__ : str =self.w_head(lowerCamelCase )
lowerCamelCase__ : List[str] =watermark_detected.flatten()
lowerCamelCase__ : Optional[Any] =watermark_detected > w_threshold
lowerCamelCase__ : int =watermark_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(lowerCamelCase ):
if watermark_detected_:
lowerCamelCase__ : Optional[int] =np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 272 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> set[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict =set(__lowerCAmelCase ), [start]
while stack:
UpperCAmelCase : int =stack.pop()
explored.add(__lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCAmelCase )
return explored
__snake_case = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 348 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowercase__ : Optional[int] = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowercase__ : Union[str, Any] = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowercase ( ):
snake_case_ : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case_ : Any = bs[:]
snake_case_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
snake_case_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def __lowercase ( _a ):
snake_case_ : Optional[int] = set()
snake_case_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Dict = char
return pairs
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : int = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]="replace" , lowercase_ : Union[str, Any]="<s>" , lowercase_ : Dict="</s>" , lowercase_ : Dict="</s>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : int="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : List[str]="<mask>" , lowercase_ : Tuple=False , **lowercase_ : Any , ):
snake_case_ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
snake_case_ : Optional[int] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
snake_case_ : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
snake_case_ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
snake_case_ : Optional[int] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
snake_case_ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding='''utf-8''' ) as vocab_handle:
snake_case_ : List[str] = json.load(lowercase_ )
snake_case_ : Dict = {v: k for k, v in self.encoder.items()}
snake_case_ : Tuple = errors # how to handle errors in decoding
snake_case_ : int = bytes_to_unicode()
snake_case_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding='''utf-8''' ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
snake_case_ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : str = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ : Dict = {}
snake_case_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Optional[int] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Union[str, Any] ):
return len(self.encoder )
def _snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , lowercase_ : int ):
if token in self.cache:
return self.cache[token]
snake_case_ : str = tuple(lowercase_ )
snake_case_ : List[str] = get_pairs(lowercase_ )
if not pairs:
return token
while True:
snake_case_ : Optional[int] = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ : Union[str, Any] = bigram
snake_case_ : Dict = []
snake_case_ : Optional[Any] = 0
while i < len(lowercase_ ):
try:
snake_case_ : Tuple = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[int] = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Tuple = tuple(lowercase_ )
snake_case_ : List[str] = new_word
if len(lowercase_ ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(lowercase_ )
snake_case_ : Optional[Any] = ''' '''.join(lowercase_ )
snake_case_ : Dict = word
return word
def _snake_case ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Union[str, Any] = []
for token in re.findall(self.pat , lowercase_ ):
snake_case_ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[str] , lowercase_ : Tuple ):
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : List[str] , lowercase_ : Any ):
return self.decoder.get(lowercase_ )
def _snake_case ( self : Optional[int] , lowercase_ : Optional[int] ):
snake_case_ : int = ''''''.join(lowercase_ )
snake_case_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Any = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '''\n''' )
snake_case_ : List[str] = 0
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
snake_case_ : Optional[int] = token_index
writer.write(''' '''.join(lowercase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : str = [self.sep_token_id]
snake_case_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any]=False , **lowercase_ : List[Any] ):
snake_case_ : Optional[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
snake_case_ : str = ''' ''' + text
return (text, kwargs)
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , lowercase_ : "Conversation" ):
snake_case_ : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase_ )
snake_case_ : int = ''' '''.join(lowercase_ )
snake_case_ : List[Any] = self.encode(lowercase_ )
if len(lowercase_ ) > self.model_max_length:
snake_case_ : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 371 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : str = get_logger(__name__)
lowercase__ : List[str] = Path(__file__).parent / '''model_card_template.md'''
lowercase__ : Union[str, Any] = uuida().hex
lowercase__ : Tuple = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowercase ( _a = None ):
snake_case_ : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def __lowercase ( _a , _a = None , _a = None ):
if token is None:
snake_case_ : Union[str, Any] = HfFolder.get_token()
if organization is None:
snake_case_ : int = whoami(_a )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def __lowercase ( _a , _a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ : Union[str, Any] = args.hub_token if hasattr(_a , '''hub_token''' ) else None
snake_case_ : Dict = get_full_repo_name(_a , token=_a )
snake_case_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ : Tuple = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_a )
def __lowercase ( _a , _a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : Tuple = str(Path(_a ).as_posix() )
snake_case_ : int = re.search(r'''snapshots/([^/]+)/''' , _a )
if search is None:
return None
snake_case_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : str = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowercase__ : List[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __lowercase ( _a = None , _a = None ):
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : List[str] = old_diffusers_cache
snake_case_ : Union[str, Any] = Path(_a ).expanduser()
snake_case_ : str = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : List[Any] = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Optional[Any] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowercase__ : Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : Optional[Any] = int(f.read())
except ValueError:
lowercase__ : Optional[Any] = 0
if cache_version < 1:
lowercase__ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowercase__ : Optional[Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowercase ( _a , _a = None ):
if variant is not None:
snake_case_ : str = weights_name.split('''.''' )
snake_case_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
snake_case_ : List[Any] = '''.'''.join(_a )
return weights_name
def __lowercase ( _a , *,
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a=None , ):
snake_case_ : Dict = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
snake_case_ : Dict = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
snake_case_ : List[Any] = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ : str = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _a , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}' so that the correct variant file can be added." , _a , )
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 155 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCamelCase : str = ['accelerate', 'launch']
__lowerCamelCase : Optional[Any] = Path.home() / '.cache/huggingface/accelerate'
__lowerCamelCase : Optional[Any] = 'default_config.yaml'
__lowerCamelCase : str = config_folder / config_file
__lowerCamelCase : List[str] = config_folder / '_default_config.yaml'
__lowerCamelCase : Any = Path('tests/test_configs' )
@classmethod
def a__ (cls ) -> Any:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def a__ (cls ) -> Optional[Any]:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def a__ (self ) -> List[str]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=A ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(A ), self.test_file_path] , env=os.environ.copy() )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'test-tpu'
__lowerCamelCase : Optional[int] = 'us-central1-a'
__lowerCamelCase : Tuple = 'ls'
__lowerCamelCase : int = ['accelerate', 'tpu-config']
__lowerCamelCase : int = 'cd /usr/share'
__lowerCamelCase : Tuple = 'tests/test_samples/test_command_file.sh'
__lowerCamelCase : List[Any] = 'Running gcloud compute tpus tpu-vm ssh'
def a__ (self ) -> Any:
"""simple docstring"""
_a = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A , )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=A )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A , )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , A , )
def a__ (self ) -> int:
"""simple docstring"""
_a = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A , )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , A , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=A , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , A , )
| 211 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase_ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase (__A , __A , __A , __A , __A , __A):
"""simple docstring"""
for attribute in key.split('''.'''):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_a = '''lm_head'''
_a = getattr(__A , __A)
if weight_type is not None:
_a = getattr(__A , __A).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
_a = True
if "*" in mapped_key:
_a = name.split(__A)[0].split('''.''')[-2]
_a = mapped_key.replace('''*''' , __A)
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = '''weight'''
else:
_a = None
set_recursively(__A , __A , __A , __A , __A , __A)
continue
if not is_used:
unused_weights.append(__A)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a = full_name.split('''conv_layers.''')[-1]
_a = name.split('''.''')
_a = int(items[0])
_a = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__A)
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A=None , __A=None , __A=True):
"""simple docstring"""
if config_path is not None:
_a = UniSpeechConfig.from_pretrained(__A)
else:
_a = UniSpeechConfig()
if is_finetuned:
if dict_path:
_a = Dictionary.load_from_json(__A)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols)
_a = os.path.join(__A , '''vocab.json''')
if not os.path.isdir(__A):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A))
return
os.makedirs(__A , exist_ok=__A)
_a = target_dict.indices
# fairseq has the <pad> and <s> switched
_a = 42
_a = 43
with open(__A , '''w''' , encoding='''utf-8''') as vocab_handle:
json.dump(__A , __A)
_a = WavaVecaPhonemeCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , )
_a = True if config.feat_extract_norm == '''layer''' else False
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
_a = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A)
processor.save_pretrained(__A)
_a = UniSpeechForCTC(__A)
else:
_a = UniSpeechForPreTraining(__A)
if is_finetuned:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1]), '''w2v_path''': checkpoint_path})
else:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
_a = model[0].eval()
recursively_load_weights(__A , __A , __A)
hf_unispeech.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 211 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( _snake_case : Any , _snake_case : Optional[int] ):
"""simple docstring"""
__a =old_name
if "patch_embed" in old_name:
__a , __a , __a =old_name.split('.' )
if layer == "0":
__a =old_name.replace('0' , 'convolution1' )
elif layer == "1":
__a =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__a =old_name.replace('3' , 'convolution2' )
else:
__a =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _snake_case ):
__a =r'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
__a =re.search(r'\d\.\d\d.' , _snake_case ).group()
else:
__a =re.search(r'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
__a =old_name.replace(_snake_case , '' )
__a =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__a ='intermediate_stages.' + trimmed_name
else:
__a =old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__a =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__a =str(int(match[2] ) - num_meta4D_last_stage )
__a =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__a =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__a =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__a =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__a =trimmed_name.replace('fc2' , 'linear_out' )
__a ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _snake_case ):
__a =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__a =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__a =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__a =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__a =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__a ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a =new_name.replace('norm' , 'layernorm' )
__a ='efficientformer.' + new_name
else:
__a ='efficientformer.encoder.' + new_name
return new_name
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : List[str] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__a =checkpoint.pop(_snake_case )
__a =val
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCamelCase_( _snake_case : Path , _snake_case : Path , _snake_case : Path , _snake_case : bool ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )['model']
__a =EfficientFormerConfig.from_json_file(_snake_case )
__a =EfficientFormerForImageClassificationWithTeacher(_snake_case )
__a ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__a =config.depths[-1] - config.num_metaad_blocks + 1
__a =convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__a =prepare_img()
__a =256
__a =224
__a =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__a =Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
__a =image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
__a =model(_snake_case )
__a =outputs.logits
__a =(1, 1000)
if "l1" in model_name:
__a =torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a =torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a =torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 356 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""torch""", """torchsde"""]
def __init__( self , *lowercase , **lowercase ):
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def A_ ( cls , *lowercase , **lowercase ):
requires_backends(cls , ['torch', 'torchsde'] ) | 96 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Any = AlbertConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : Dict = AlbertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ = ['''the''', '''be''', '''to''', '''of''', '''and''', '''in''', '''that''', '''have''']
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = ""
__lowercase = 42
__lowercase = 42
__lowercase = 42
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) , __UpperCamelCase ):
__lowercase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for key in product(__UpperCamelCase , repeat=3 ):
__lowercase = try_key(__UpperCamelCase , __UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def _A ( A__ , A__ ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def _A ( A__ = "p059_cipher.txt" ):
"""simple docstring"""
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = 42
__lowercase = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='''utf-8''' )
__lowercase = [int(__UpperCamelCase ) for number in data.strip().split(''',''' )]
__lowercase = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
__lowercase = filter_common_word(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
__lowercase = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"]
UpperCamelCase_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase ( a ):
lowercase__ : Tuple = """umt5"""
lowercase__ : Tuple = ["""past_key_values"""]
def __init__( self : Optional[Any] , _UpperCamelCase : List[Any]=250_112 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Dict=1_024 , _UpperCamelCase : int=8 , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=6 , _UpperCamelCase : int=32 , _UpperCamelCase : str=128 , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[str]=1e-6 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : Optional[Any]="gated-gelu" , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]="T5Tokenizer" , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=0 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : str=0 , **_UpperCamelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=_UpperCamelCase , tokenizer_class=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_kv
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = relative_attention_max_distance
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = feed_forward_proj
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE = act_info[-1]
SCREAMING_SNAKE_CASE = act_info[0] == "gated"
if len(_UpperCamelCase ) > 1 and act_info[0] != "gated" or len(_UpperCamelCase ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE = "gelu_new"
@property
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
return self.d_model
@property
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
return self.num_heads
@property
def __snake_case( self : Any ) -> int:
'''simple docstring'''
return self.num_layers
class lowercase ( a ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __snake_case( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
SCREAMING_SNAKE_CASE = "past_encoder_sequence + sequence"
SCREAMING_SNAKE_CASE = {0: "batch"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
@property
def __snake_case( self : Optional[int] ) -> float:
'''simple docstring'''
return 5e-4
| 206 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Union[str, Any] , **_UpperCamelCase : str ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[Any] , **_UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=_UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 206 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = inspect.getfile(accelerate.test_utils)
lowerCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_cli.py"""])
lowerCAmelCase_ = ["""accelerate""", """launch"""]
lowerCAmelCase_ = Path.home() / """.cache/huggingface/accelerate"""
lowerCAmelCase_ = """default_config.yaml"""
lowerCAmelCase_ = config_folder / config_file
lowerCAmelCase_ = config_folder / """_default_config.yaml"""
lowerCAmelCase_ = Path("""tests/test_configs""")
@classmethod
def UpperCAmelCase_ ( cls )-> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase_ ( cls )-> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=A_ ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(A_ ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = """test-tpu"""
lowerCAmelCase_ = """us-central1-a"""
lowerCAmelCase_ = """ls"""
lowerCAmelCase_ = ["""accelerate""", """tpu-config"""]
lowerCAmelCase_ = """cd /usr/share"""
lowerCAmelCase_ = """tests/test_samples/test_command_file.sh"""
lowerCAmelCase_ = """Running gcloud compute tpus tpu-vm ssh"""
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=A_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=A_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
| 251 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( a_ ,unittest.TestCase ):
'''simple docstring'''
a__ = CTRLTokenizer
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
A : Any = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
A : List[Any] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
A : str = {"unk_token": "<unk>"}
A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
A : Optional[int] = "adapt react readapt apt"
A : List[Any] = "adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
A : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A : List[str] = "adapt react readapt apt"
A : List[Any] = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
A : Optional[int] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = tokens + [tokenizer.unk_token]
A : Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) | 350 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Any = set()
A : int = []
def parse_line(_lowerCamelCase ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Any = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
A : Union[str, Any] = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
A : Union[str, Any] = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Tuple = set()
A : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase ):
return values.split("," )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
__SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 256 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230 |
from torch import nn
class a ( nn.Module ):
def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :int ):
super().__init__()
snake_case__ : Optional[Any] = class_size
snake_case__ : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case__ : Dict = nn.Linear(__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :int ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
snake_case__ : Optional[Any] = self.mlp(__lowercase )
return logits
| 230 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ = 1000000 ):
UpperCAmelCase = limit + 1
UpperCAmelCase = [0] * limit
for first_term in range(1 , lowercase_ ):
for n in range(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 364 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :CLIPSegForImageSegmentation , lowercase_ :CLIPSegProcessor , lowercase_ :AutoencoderKL , lowercase_ :CLIPTextModel , lowercase_ :CLIPTokenizer , lowercase_ :UNetaDConditionModel , lowercase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ :StableDiffusionSafetyChecker , lowercase_ :CLIPImageProcessor , ) -> List[str]:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = 1
UpperCAmelCase = FrozenDict(lowercase_ )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = True
UpperCAmelCase = FrozenDict(lowercase_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
self.enable_attention_slicing(lowercase_ )
def UpperCAmelCase__ ( self :int ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , lowercase_ :Union[str, List[str]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ :str , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 50 , lowercase_ :float = 7.5 , lowercase_ :Optional[Union[str, List[str]]] = None , lowercase_ :Optional[int] = 1 , lowercase_ :float = 0.0 , lowercase_ :Optional[torch.Generator] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , lowercase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ :int = 1 , **lowercase_ :int , ) -> int:
UpperCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
UpperCAmelCase = self.segmentation_model(**lowercase_ )
UpperCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase = self.numpy_to_pil(lowercase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
| 181 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowercase_ ( _lowerCamelCase : np.ndarray):
return input_array.reshape((input_array.size, 1))
def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : int):
lowercase__ : int = np.nan
for i in range(_lowerCamelCase):
lowercase__ : List[Any] = features[:, labels == i]
lowercase__ : Any = data.mean(1)
# Centralize the data of class i
lowercase__ : List[Any] = data - column_reshape(_lowerCamelCase)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : str = np.dot(_lowerCamelCase , centered_data.T)
return covariance_sum / features.shape[1]
def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : int):
lowercase__ : str = features.mean(1)
lowercase__ : Any = np.nan
for i in range(_lowerCamelCase):
lowercase__ : List[Any] = features[:, labels == i]
lowercase__ : Tuple = data.shape[1]
lowercase__ : Tuple = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase) , (column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase)).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : str = device_data * np.dot(
column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase) , (column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase)).T , )
return covariance_sum / features.shape[1]
def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int):
# Check if the features have been loaded
if features.any():
lowercase__ : List[Any] = features.mean(1)
# Center the dataset
lowercase__ : Any = features - np.reshape(_lowerCamelCase , (data_mean.size, 1))
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , centered_data.T) / features.shape[1]
lowercase__ , lowercase__ : Optional[Any] = np.linalg.eigh(_lowerCamelCase)
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Dict = np.dot(filtered_eigenvectors.T , _lowerCamelCase)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase)
logging.error("Dataset empty")
raise AssertionError
def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ : int = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , )
lowercase__ : List[Any] = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ : Dict = np.linalg.svd(_lowerCamelCase)
lowercase__ : Any = svd_matrix[:, 0:dimensions]
lowercase__ : Optional[int] = np.dot(filtered_svd_matrix.T , _lowerCamelCase)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase)
logging.error("Dataset empty")
raise AssertionError
def lowercase_ ( ):
# Create dummy dataset with 2 classes and 3 features
lowercase__ : Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
lowercase__ : Tuple = np.array([0, 0, 0, 1, 1])
lowercase__ : Tuple = 2
lowercase__ : Optional[int] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase) as error_info:
lowercase__ : Tuple = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if isinstance(_lowerCamelCase , np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes")
assert error_info.type is AssertionError
def lowercase_ ( ):
lowercase__ : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
lowercase__ : Dict = 2
lowercase__ : List[str] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]])
with pytest.raises(_lowerCamelCase) as error_info:
lowercase__ : Dict = principal_component_analysis(_lowerCamelCase , _lowerCamelCase)
if not np.allclose(_lowerCamelCase , _lowerCamelCase):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Tuple ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = "swinv2"
__lowerCAmelCase :List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , **__lowercase , ) -> Any:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Optional[Any] = image_size
a__ : Union[str, Any] = patch_size
a__ : List[Any] = num_channels
a__ : Union[str, Any] = embed_dim
a__ : Any = depths
a__ : List[str] = len(__lowercase )
a__ : Optional[Any] = num_heads
a__ : Union[str, Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : List[str] = qkv_bias
a__ : Dict = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : List[Any] = drop_path_rate
a__ : Tuple = hidden_act
a__ : Dict = use_absolute_embeddings
a__ : Tuple = layer_norm_eps
a__ : Tuple = initializer_range
a__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : int = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
a__ : Dict = (0, 0, 0, 0)
| 170 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 199 |
'''simple docstring'''
from timeit import timeit
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ) -> None:
def do_benchmark(snake_case ) -> None:
_lowercase : Optional[int] = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
_lowercase : int = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
_lowercase : Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class lowercase_ :
def __init__( self , a=None , a=None ):
# Input as list
UpperCamelCase__ = list(poly_a or [0] )[:]
UpperCamelCase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCamelCase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCamelCase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCamelCase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCamelCase__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCamelCase__ = self.__multiply()
def __a ( self , a ):
UpperCamelCase__ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(a ) <= 1:
return dft[0]
#
UpperCamelCase__ = self.c_max_length // 2
while next_ncol > 0:
UpperCamelCase__ = [[] for i in range(a )]
UpperCamelCase__ = self.root**next_ncol
# First half of next step
UpperCamelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCamelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCamelCase__ = new_dft
UpperCamelCase__ = next_ncol // 2
return dft[0]
def __a ( self ):
UpperCamelCase__ = self.__dft("A" )
UpperCamelCase__ = self.__dft("B" )
UpperCamelCase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCamelCase__ = 2
while next_ncol <= self.c_max_length:
UpperCamelCase__ = [[] for i in range(a )]
UpperCamelCase__ = self.root ** (next_ncol // 2)
UpperCamelCase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCamelCase__ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCamelCase__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
UpperCamelCase__ = "A = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCamelCase__ = "B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCamelCase__ = "A*B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase = [1] * inputs.shape.rank
__UpperCAmelCase = shape_list(snake_case_ )[axis]
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase = tf.shape(snake_case_ )
__UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :tf.Tensor ):
if not isinstance(snake_case_ , tf.Tensor ):
__UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ):
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ):
__UpperCAmelCase = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
__UpperCAmelCase = np.asarray(snake_case_ )
__UpperCAmelCase = 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__UpperCAmelCase = chunk_data
else:
__UpperCAmelCase = data
def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ):
if name in group.attrs:
__UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase = []
__UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( snake_case_ :Tuple ):
def _expand_single_ad_tensor(snake_case_ :Optional[int] ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 332 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[Any] = 13
_lowerCAmelCase : List[str] = 7
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = 99
_lowerCAmelCase : Union[str, Any] = 32
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Dict = 37
_lowerCAmelCase : List[str] = "gelu"
_lowerCAmelCase : int = 0.1
_lowerCAmelCase : Tuple = 0.1
_lowerCAmelCase : Optional[Any] = 512
_lowerCAmelCase : Dict = 16
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Tuple = 0.02
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : List[Any] = 4
_lowerCAmelCase : str = None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : int = None
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : str = self.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEsmModel(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
_lowerCAmelCase : Optional[Any] = model(__a)
_lowerCAmelCase : Any = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = TFEsmModel(config=__a)
_lowerCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_lowerCAmelCase : Optional[int] = model(__a)
_lowerCAmelCase : List[str] = [input_ids, input_mask]
_lowerCAmelCase : Any = model(__a, encoder_hidden_states=__a)
# Also check the case where encoder outputs are not passed
_lowerCAmelCase : List[str] = model(__a, attention_mask=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFEsmForMaskedLM(config=__a)
_lowerCAmelCase : Any = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = TFEsmForTokenClassification(config=__a)
_lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_lowerCAmelCase : Tuple = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFEsmModelTester(self)
_lowerCAmelCase : Tuple = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFEsmModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip("Protein models do not support embedding resizing.")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase : Union[str, Any] = model.get_bias()
assert isinstance(__a, __a)
for k, v in name.items():
assert isinstance(__a, tf.Variable)
else:
_lowerCAmelCase : str = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Tuple = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Optional[Any] = model(__a)[0]
_lowerCAmelCase : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape), __a)
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowerCAmelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowerCAmelCase : Tuple = model(__a)[0]
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4))
| 365 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = "deformable_detr"
_UpperCAmelCase :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=300 , _UpperCAmelCase=1024 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=False , _UpperCAmelCase=300 , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__: Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = backbone_config.get('''model_type''' )
lowercase__: Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase__: int = config_class.from_dict(_UpperCAmelCase )
lowercase__: int = use_timm_backbone
lowercase__: List[Any] = backbone_config
lowercase__: Union[str, Any] = num_channels
lowercase__: Any = num_queries
lowercase__: Union[str, Any] = max_position_embeddings
lowercase__: Dict = d_model
lowercase__: Any = encoder_ffn_dim
lowercase__: str = encoder_layers
lowercase__: Optional[Any] = encoder_attention_heads
lowercase__: str = decoder_ffn_dim
lowercase__: Union[str, Any] = decoder_layers
lowercase__: Any = decoder_attention_heads
lowercase__: str = dropout
lowercase__: Optional[int] = attention_dropout
lowercase__: Any = activation_dropout
lowercase__: Optional[int] = activation_function
lowercase__: int = init_std
lowercase__: Tuple = init_xavier_std
lowercase__: Any = encoder_layerdrop
lowercase__: int = auxiliary_loss
lowercase__: Any = position_embedding_type
lowercase__: List[str] = backbone
lowercase__: List[str] = use_pretrained_backbone
lowercase__: List[str] = dilation
# deformable attributes
lowercase__: List[str] = num_feature_levels
lowercase__: Optional[int] = encoder_n_points
lowercase__: Tuple = decoder_n_points
lowercase__: List[Any] = two_stage
lowercase__: Optional[Any] = two_stage_num_proposals
lowercase__: List[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase__: List[str] = class_cost
lowercase__: Any = bbox_cost
lowercase__: Dict = giou_cost
# Loss coefficients
lowercase__: Union[str, Any] = mask_loss_coefficient
lowercase__: List[str] = dice_loss_coefficient
lowercase__: Any = bbox_loss_coefficient
lowercase__: List[Any] = giou_loss_coefficient
lowercase__: Tuple = eos_coefficient
lowercase__: int = focal_alpha
lowercase__: Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
return self.encoder_attention_heads
@property
def _snake_case ( self ):
return self.d_model
def _snake_case ( self ):
lowercase__: Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__: List[str] = self.backbone_config.to_dict()
lowercase__: List[Any] = self.__class__.model_type
return output
| 177 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: List[str] = '''A painting of a squirrel eating a burger '''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Union[str, Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__: Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = generator.manual_seed(0 )
lowercase__: List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Tuple = '''A painting of a squirrel eating a burger '''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: Tuple = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 177 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """swinv2"""
_snake_case = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A=2_2_4 , A=4 , A=3 , A=9_6 , A=[2, 2, 6, 2] , A=[3, 6, 1_2, 2_4] , A=7 , A=4.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=0.02 , A=1e-5 , A=3_2 , **A , ) -> Union[str, Any]:
super().__init__(**A )
snake_case : Union[str, Any] = image_size
snake_case : int = patch_size
snake_case : Optional[int] = num_channels
snake_case : Any = embed_dim
snake_case : List[Any] = depths
snake_case : List[Any] = len(A )
snake_case : Dict = num_heads
snake_case : Optional[int] = window_size
snake_case : Optional[Any] = mlp_ratio
snake_case : List[Any] = qkv_bias
snake_case : Tuple = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Optional[int] = drop_path_rate
snake_case : str = hidden_act
snake_case : Any = use_absolute_embeddings
snake_case : Dict = layer_norm_eps
snake_case : Optional[Any] = initializer_range
snake_case : int = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case : Dict = int(embed_dim * 2 ** (len(A ) - 1) )
snake_case : int = (0, 0, 0, 0)
| 176 |
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Optional[int] = []
snake_case : Tuple = 1
while len(lowercase ) < 1E6:
constant.append(str(lowercase ) )
i += 1
snake_case : int = """""".join(lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 176 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''wavlm'''
def __init__(self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=8_00 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = feat_extract_norm
SCREAMING_SNAKE_CASE__ : str = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = conv_bias
SCREAMING_SNAKE_CASE__ : Dict = num_buckets
SCREAMING_SNAKE_CASE__ : List[str] = max_bucket_distance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Tuple = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : Any = final_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_ctc_classes
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : List[str] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE__ : Any = mask_time_length
SCREAMING_SNAKE_CASE__ : Tuple = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_feature_prob
SCREAMING_SNAKE_CASE__ : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : Any = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : str = num_codevector_groups
SCREAMING_SNAKE_CASE__ : Tuple = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : Any = num_negatives
SCREAMING_SNAKE_CASE__ : Dict = codevector_dim
SCREAMING_SNAKE_CASE__ : List[str] = proj_codevector_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : Tuple = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : Dict = add_adapter
SCREAMING_SNAKE_CASE__ : List[str] = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : List[str] = num_adapter_layers
SCREAMING_SNAKE_CASE__ : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = xvector_output_dim
@property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =VOCAB_FILES_NAMES
lowerCamelCase : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str =["input_ids", "attention_mask"]
lowerCamelCase : List[int] =[]
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int="<unk>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : Dict="<pad>" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Optional[int]="[CLS]" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : str , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
__lowerCAmelCase : Any = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
__lowerCAmelCase : Optional[int] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
__lowerCAmelCase : Tuple = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
__lowerCAmelCase : Optional[Any] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
__lowerCAmelCase : Optional[Any] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase : Optional[int] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
__lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sep_token=lowerCAmelCase , mask_token=lowerCAmelCase , cls_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
return state
def __setstate__( self : List[str] , lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.sp_model.IdToPiece(lowerCAmelCase )
return token
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Tuple = """"""
__lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCAmelCase )
__lowerCAmelCase : Dict = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : bool = False , lowerCAmelCase : bool = None , lowerCAmelCase : bool = True , **lowerCAmelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase )
__lowerCAmelCase : int = self.convert_ids_to_tokens(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase ) )
__lowerCAmelCase : List[str] = []
sub_texts.append(lowerCAmelCase )
else:
current_sub_text.append(lowerCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowerCAmelCase : Union[str, Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(lowerCAmelCase ) )
else:
__lowerCAmelCase : Union[str, Any] = """""".join(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCAmelCase : List[str] = self.clean_up_tokenization(lowerCAmelCase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
__lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
__lowerCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = [self.sep_token_id]
__lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 139 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : List[str] , __A : str ) -> int:
__lowerCAmelCase : str = set()
__lowerCAmelCase : int = []
def parse_line(__A : List[Any] ):
for line in fp:
if isinstance(__A , __A ):
__lowerCAmelCase : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
__lowerCAmelCase : Tuple = """\n""".join(__A )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
__lowerCAmelCase : Optional[int] = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
__lowerCAmelCase : Optional[Any] = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def snake_case_ (__A : Dict , __A : Union[str, Any] ) -> Dict:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : Optional[int] = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ (__A : int ) -> Tuple:
return values.split(""",""" )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
__UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 139 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a (pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple ) -> List[Any]:
super().__init__()
__snake_case : Optional[Any] = model
__snake_case : Dict = 2
__snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __snake_case ( self : Dict ) -> str:
pass
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# load longformer model from model identifier
__snake_case : List[str] = LongformerModel.from_pretrained(__lowerCamelCase )
__snake_case : int = LightningModel(__lowerCamelCase )
__snake_case : List[Any] = torch.load(__lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__snake_case : List[str] = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 123 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Union[str, Any] = 0
_snake_case : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : int = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Node | None , ) -> None:
__snake_case : List[str] = pos_x
__snake_case : List[str] = pos_y
__snake_case : Dict = (pos_y, pos_x)
__snake_case : List[Any] = goal_x
__snake_case : Union[str, Any] = goal_y
__snake_case : int = g_cost
__snake_case : List[Any] = parent
__snake_case : Optional[Any] = self.calculate_heuristic()
__snake_case : Union[str, Any] = self.g_cost + self.h_cost
def __snake_case ( self : Optional[int] ) -> float:
__snake_case : Union[str, Any] = self.pos_x - self.goal_x
__snake_case : Tuple = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[int] , lowerCamelCase : Node ) -> bool:
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> Optional[Any]:
__snake_case : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
__snake_case : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCamelCase )
__snake_case : str = [self.start]
__snake_case : list[Node] = []
__snake_case : int = False
def __snake_case ( self : Tuple ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
__snake_case : Tuple = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
__snake_case : Any = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def __snake_case ( self : Optional[Any] , lowerCamelCase : Node ) -> list[Node]:
__snake_case : int = []
for action in delta:
__snake_case : Tuple = parent.pos_x + action[1]
__snake_case : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def __snake_case ( self : Optional[Any] , lowerCamelCase : Node | None ) -> list[TPosition]:
__snake_case : List[Any] = node
__snake_case : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case : Tuple = current_node.parent
path.reverse()
return path
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> None:
__snake_case : str = AStar(lowerCamelCase , lowerCamelCase )
__snake_case : int = AStar(lowerCamelCase , lowerCamelCase )
__snake_case : int = False
def __snake_case ( self : str ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__snake_case : Optional[int] = self.fwd_astar.open_nodes.pop(0 )
__snake_case : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
__snake_case : Optional[Any] = current_bwd_node
__snake_case : Any = current_fwd_node
__snake_case : int = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
__snake_case : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def __snake_case ( self : Any , lowerCamelCase : Node , lowerCamelCase : Node ) -> list[TPosition]:
__snake_case : Optional[int] = self.fwd_astar.retrace_path(lowerCamelCase )
__snake_case : Optional[Any] = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__snake_case : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : Dict = (0, 0)
_snake_case : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : List[Any] = time.time()
_snake_case : Dict = AStar(init, goal)
_snake_case : Optional[int] = a_star.search()
_snake_case : Optional[Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_snake_case : List[str] = time.time()
_snake_case : Any = BidirectionalAStar(init, goal)
_snake_case : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 123 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCamelCase = pd.read_csv('''sample_data.csv''', header=None)
_UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCamelCase = df.iloc[:, 1:2]
_UpperCamelCase = actual_data.values.reshape(len_data, 1)
_UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
_UpperCamelCase = 10
_UpperCamelCase = 5
_UpperCamelCase = 20
_UpperCamelCase = len_data - periods * look_back
_UpperCamelCase = actual_data[:division]
_UpperCamelCase = actual_data[division - look_back :]
_UpperCamelCase = [], []
_UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCamelCase = np.array(train_x)
_UpperCamelCase = np.array(test_x)
_UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
_UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
_UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCamelCase = model.predict(x_test)
| 358 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : int = scope
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_config()
__UpperCAmelCase : List[Any] = 300
return config
def __A ( self ) -> Dict:
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = ()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MraModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : int = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = 50_265
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Any = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Dict = 50_265
__UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ : Any , A__ : Union[str, Any] , A__ : Dict ) -> Any:
"""simple docstring"""
_lowercase =MobileBertConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase =MobileBertForPreTraining(A__ )
# Load weights from tf checkpoint
_lowercase =load_tf_weights_in_mobilebert(A__ , A__ , A__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 205 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =False
_lowercase =nn.Dropout(p=lowerCAmelCase )
_lowercase =TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
_lowercase =nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
_lowercase =TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
_lowercase =TaLayerNorm(lowerCAmelCase )
_lowercase =nn.Dropout(p=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =self.token_embedder(lowerCAmelCase )
_lowercase =encoder_input_tokens.shape[1]
_lowercase =torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
_lowercase =self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
_lowercase =encoder_input_tokens.size()
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
_lowercase =lyr(lowerCAmelCase , lowerCAmelCase )[0]
_lowercase =self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 205 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 |
"""simple docstring"""
a_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
a_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float:
'''simple docstring'''
__A : Tuple = from_type.lower().strip('''s''' )
__A : Optional[int] = to_type.lower().strip('''s''' )
__A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
__A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__A : int = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__A : str = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__A : Optional[Any] = METRIC_CONVERSION[from_sanitized]
__A : Optional[int] = METRIC_CONVERSION[to_sanitized]
__A : Union[str, Any] = 1
if from_exponent > to_exponent:
__A : Dict = from_exponent - to_exponent
else:
__A : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 ,snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : list[list[int]] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
# We need to create solution object to save path.
lowerCamelCase_ = [[0 for _ in range(lowercase )] for _ in range(lowercase )]
lowerCamelCase_ = run_maze(lowercase , 0 , 0 , lowercase )
if solved:
print('\n'.join(str(lowercase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _SCREAMING_SNAKE_CASE ( lowercase : list[list[int]] , lowercase : int , lowercase : int , lowercase : list[list[int]] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
# Final check point.
if i == j == (size - 1):
lowerCamelCase_ = 1
return True
lowerCamelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase_ = 1
# check for directions
if (
run_maze(lowercase , i + 1 , lowercase , lowercase )
or run_maze(lowercase , lowercase , j + 1 , lowercase )
or run_maze(lowercase , i - 1 , lowercase , lowercase )
or run_maze(lowercase , lowercase , j - 1 , lowercase )
):
return True
lowerCamelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 2**power
lowerCamelCase_ = 0
while n:
lowerCamelCase_ , lowerCamelCase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 204 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[str] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : float , _a : float , _a : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | 1 |
"""simple docstring"""
def lowercase ( a__ : int ) -> int:
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | """simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = TextaTextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ANY(__UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = generator('''This is a test''' , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
| 54 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.