code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0E4 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_snake_case : Dict = float(embedding_dim // 2 )
_snake_case : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_snake_case : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
_snake_case : Tuple = jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 )
# scale embeddings
_snake_case : Union[str, Any] = scale * emb
if flip_sin_to_cos:
_snake_case : str = jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 )
else:
_snake_case : Union[str, Any] = jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 )
_snake_case : str = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] )
return signal
class lowerCamelCase (nn.Module ):
_lowercase : int = 32
_lowercase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowercase__ )
_snake_case : List[Any] = nn.silu(lowercase__ )
_snake_case : List[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowercase__ )
return temb
class lowerCamelCase (nn.Module ):
_lowercase : int = 32
_lowercase : bool = False
_lowercase : float = 1
@nn.compact
def __call__( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return get_sinusoidal_embeddings(
lowercase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 703
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_snake_case : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case : Union[str, Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 47
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def run_func(lowerCAmelCase_ ):
@wraps(lowerCAmelCase_ )
def run_in_eager_mode(*lowerCAmelCase_ , **lowerCAmelCase_ ):
return func(*lowerCAmelCase_ , **lowerCAmelCase_ )
@wraps(lowerCAmelCase_ )
@tf.function(experimental_compile=lowerCAmelCase_ )
def run_in_graph_mode(*lowerCAmelCase_ , **lowerCAmelCase_ ):
return func(*lowerCAmelCase_ , **lowerCAmelCase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = random.Random()
_snake_case : Union[str, Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCamelCase (a__ ):
_lowercase : TensorFlowBenchmarkArguments
_lowercase : PretrainedConfig
_lowercase : str = "TensorFlow"
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return tf.__version__
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> float:
"""simple docstring"""
_snake_case : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : Optional[Any] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_speed(_inference )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> float:
"""simple docstring"""
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : Tuple = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_speed(_train )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__ )
_snake_case : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : List[Any] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_memory(_inference )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__ )
_snake_case : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_snake_case : List[str] = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__ )
return self._measure_memory(_train )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Callable[[], None]:
"""simple docstring"""
_snake_case : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_snake_case : List[Any] = (
hasattr(lowercase__ , '''architectures''' )
and isinstance(config.architectures , lowercase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : int = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : Dict = __import__('''transformers''' , fromlist=[model_class] )
_snake_case : List[Any] = getattr(lowercase__ , lowercase__ )
_snake_case : Tuple = model_cls(lowercase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_snake_case : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](lowercase__ )
# encoder-decoder has vocab size saved differently
_snake_case : Optional[Any] = config.vocab_size if hasattr(lowercase__ , '''vocab_size''' ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(lowercase__ , lowercase__ , lowercase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowercase__ , decoder_input_ids=lowercase__ , training=lowercase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowercase__ , training=lowercase__ )
_snake_case : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Callable[[], None]:
"""simple docstring"""
_snake_case : int = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_snake_case : List[str] = (
hasattr(lowercase__ , '''architectures''' )
and isinstance(config.architectures , lowercase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : Dict = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
_snake_case : str = getattr(lowercase__ , lowercase__ )
_snake_case : Optional[int] = model_cls(lowercase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_snake_case : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase__ )
# encoder-decoder has vocab size saved differently
_snake_case : Union[str, Any] = config.vocab_size if hasattr(lowercase__ , '''vocab_size''' ) else config.encoder.vocab_size
_snake_case : Any = random_input_ids(lowercase__ , lowercase__ , lowercase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_snake_case : Optional[int] = model(lowercase__ , decoder_input_ids=lowercase__ , labels=lowercase__ , training=lowercase__ )[0]
_snake_case : List[str] = tf.gradients(lowercase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_snake_case : List[str] = model(lowercase__ , labels=lowercase__ , training=lowercase__ )[0]
_snake_case : Optional[int] = tf.gradients(lowercase__ , model.trainable_variables )
return gradients
_snake_case : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCAmelCase_ ( self , lowercase__ ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(lowercase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Tuple = timeit.repeat(
lowercase__ , repeat=self.args.repeat , number=10 , )
return min(lowercase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCAmelCase_ ( self , lowercase__ ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_snake_case : List[Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_snake_case : str = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_snake_case : Optional[int] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(lowercase__ )
_snake_case : Any = meminfo.used
_snake_case : str = Memory(lowercase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_snake_case : int = None
else:
_snake_case : Optional[int] = measure_peak_memory_cpu(lowercase__ )
_snake_case : List[str] = Memory(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : Optional[int] = stop_memory_tracing(lowercase__ )
if memory is None:
_snake_case : Any = summary.total
else:
_snake_case : Dict = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 704
|
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_snake_case : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
_snake_case : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
_snake_case : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : Tuple = value
elif weight_type == "weight_v":
_snake_case : str = value
elif weight_type == "bias":
_snake_case : Optional[int] = value
else:
_snake_case : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = []
_snake_case : str = fairseq_model.state_dict()
_snake_case : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
_snake_case : int = True
else:
for key, mapped_key in MAPPING.items():
_snake_case : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_snake_case : Any = True
if "*" in mapped_key:
_snake_case : List[Any] = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
_snake_case : str = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
_snake_case : Tuple = '''weight_g'''
elif "weight_v" in name:
_snake_case : Tuple = '''weight_v'''
elif "weight" in name:
_snake_case : str = '''weight'''
elif "bias" in name:
_snake_case : List[str] = '''bias'''
else:
_snake_case : Union[str, Any] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = full_name.split('''conv_layers.''' )[-1]
_snake_case : Any = name.split('''.''' )
_snake_case : str = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True ):
"""simple docstring"""
if config_path is not None:
_snake_case : List[Any] = HubertConfig.from_pretrained(lowerCAmelCase_ )
else:
_snake_case : Dict = HubertConfig()
if is_finetuned:
if dict_path:
_snake_case : Dict = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : int = target_dict.pad_index
_snake_case : List[str] = target_dict.bos_index
_snake_case : Union[str, Any] = target_dict.eos_index
_snake_case : Tuple = len(target_dict.symbols )
_snake_case : Optional[Any] = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , lowerCAmelCase_ )
_snake_case : Any = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
_snake_case : Dict = True if config.feat_extract_norm == '''layer''' else False
_snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_snake_case : int = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_snake_case : List[str] = HubertForCTC(lowerCAmelCase_ )
else:
_snake_case : List[Any] = HubertModel(lowerCAmelCase_ )
if is_finetuned:
_snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_snake_case : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_snake_case : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError('''String lengths must match!''' )
_snake_case : List[str] = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
_snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
_snake_case : Tuple = -(labels.shape[-1] * loss.item())
_snake_case : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47
| 0
|
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase : Union[str, Any] = get_logger(__name__)
class lowerCamelCase :
_lowercase : List[Any] = """dummy_data"""
_lowercase : str = """datasets"""
_lowercase : Tuple = False
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = True , lowercase__ = None , ) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = 0
_snake_case : Optional[int] = dataset_name
_snake_case : str = cache_dir
_snake_case : Optional[int] = use_local_dummy_data
_snake_case : Dict = config
# download_callbacks take a single url as input
_snake_case : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_snake_case : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_snake_case : List[str] = str(lowercase__ )
# to be downloaded
_snake_case : int = None
_snake_case : Tuple = None
@property
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
_snake_case : List[Any] = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : Optional[int] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_snake_case : int = cached_path(
lowercase__ , cache_dir=self.cache_dir , extract_compressed_file=lowercase__ , force_extract=lowercase__ )
return os.path.join(lowercase__ , self.dummy_file_name )
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
_snake_case : Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def UpperCAmelCase_ ( self , lowercase__ , *lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_snake_case : List[str] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_snake_case : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase__ , lowercase__ ):
return self.create_dummy_data_dict(lowercase__ , lowercase__ )
elif isinstance(lowercase__ , (list, tuple) ):
return self.create_dummy_data_list(lowercase__ , lowercase__ )
else:
return self.create_dummy_data_single(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , *lowercase__ ) -> Dict:
"""simple docstring"""
return self.download_and_extract(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.download_and_extract(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
return path
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {}
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase__ , lowercase__ ):
for single_url in single_urls:
download_callback(lowercase__ )
else:
_snake_case : List[Any] = single_urls
download_callback(lowercase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase__ , lowercase__ ):
_snake_case : int = [os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__ ).name ) ) for x in single_urls]
else:
_snake_case : Dict = single_urls
_snake_case : Optional[int] = os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__ ).name ) )
_snake_case : List[Any] = value
# make sure that values are unique
if all(isinstance(lowercase__ , lowercase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_snake_case : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_snake_case : List[Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , lowercase__ ) ) for url in data_url )
_snake_case : Optional[int] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_snake_case : Tuple = [data_url[0]] * len(lowercase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_snake_case : Optional[Any] = os.path.join(lowercase__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(lowercase__ )
return dummy_data_list
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(lowercase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_snake_case : str = os.path.join(lowercase__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(lowercase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self , lowercase__ ) -> str:
"""simple docstring"""
def _iter_archive_members(lowercase__ ):
# this preserves the order of the members inside the ZIP archive
_snake_case : Optional[Any] = Path(self.dummy_file ).parent
_snake_case : Any = path.relative_to(lowercase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_snake_case : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase__ )
_snake_case : Optional[Any] = Path(lowercase__ )
_snake_case : Dict = _iter_archive_members(lowercase__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(lowercase__ ).as_posix(), file_path.open('''rb''' )
def UpperCAmelCase_ ( self , lowercase__ ) -> Dict:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
_snake_case : Tuple = [paths]
for path in paths:
if os.path.isfile(lowercase__ ):
if os.path.basename(lowercase__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase__ ):
if os.path.basename(lowercase__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(lowercase__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(lowercase__ , lowercase__ )
| 708
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Any = torch.nn.Linear(10 , 10 )
_snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case : List[str] = Accelerator()
_snake_case : Optional[Any] = accelerator.prepare(lowercase__ )
try:
pickle.loads(pickle.dumps(lowercase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47
| 0
|
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0, 1
while True:
_snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 709
|
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tuple[float, float, float]
UpperCAmelCase : int = tuple[float, float, float]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = end_pointa[0] - end_pointa[0]
_snake_case : Tuple = end_pointa[1] - end_pointa[1]
_snake_case : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ):
"""simple docstring"""
_snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 'Tobias Carryer'
from time import time
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=int(time() ) ) -> Any: # noqa: B008
"""simple docstring"""
_snake_case : Dict = multiplier
_snake_case : str = increment
_snake_case : Dict = modulo
_snake_case : str = seed
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : str = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase : Any = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 710
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47
| 0
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 711
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if not is_accelerate_available():
return method
_snake_case : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCAmelCase_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCAmelCase_ , **lowerCAmelCase_ )
return wrapper
| 712
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 0
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _a ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _a ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _a ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('''https://huggingface.co''' )
| 713
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 0
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = IMAGENET_DEFAULT_MEAN , lowercase__ = IMAGENET_DEFAULT_STD , **lowercase__ , ) -> None:
"""simple docstring"""
super().__init__(**lowercase__ )
_snake_case : Any = size if size is not None else {'''shortest_edge''': 224}
_snake_case : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_snake_case : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_snake_case : str = get_size_dict(lowercase__ , param_name='''crop_size''' )
_snake_case : Optional[Any] = do_resize
_snake_case : Tuple = size
_snake_case : Dict = resample
_snake_case : Optional[int] = do_center_crop
_snake_case : List[str] = crop_size
_snake_case : int = do_rescale
_snake_case : Dict = rescale_factor
_snake_case : List[str] = do_normalize
_snake_case : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
_snake_case : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_snake_case : Union[str, Any] = int((256 / 224) * size['''shortest_edge'''] )
_snake_case : Tuple = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
_snake_case : str = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
lowercase__ , size=(size_dict['''height'''], size_dict['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
_snake_case : Union[str, Any] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
_snake_case : Dict = resample if resample is not None else self.resample
_snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
_snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
_snake_case : Tuple = size if size is not None else self.size
_snake_case : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_snake_case : str = get_size_dict(lowercase__ , param_name='''crop_size''' )
_snake_case : str = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_snake_case : List[Any] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , lowercase__ , lowercase__ ) for image in images]
if do_center_crop:
_snake_case : Tuple = [self.center_crop(lowercase__ , lowercase__ ) for image in images]
if do_rescale:
_snake_case : Optional[Any] = [self.rescale(lowercase__ , lowercase__ ) for image in images]
if do_normalize:
_snake_case : str = [self.normalize(lowercase__ , lowercase__ , lowercase__ ) for image in images]
_snake_case : int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 715
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case , _snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 47
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : str = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> None:
"""simple docstring"""
super().__init__(**lowercase__ )
_snake_case : Optional[int] = size if size is not None else {'''shortest_edge''': 256}
_snake_case : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_snake_case : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_snake_case : Union[str, Any] = get_size_dict(lowercase__ )
_snake_case : int = do_resize
_snake_case : Dict = size
_snake_case : Optional[int] = resample
_snake_case : Dict = do_center_crop
_snake_case : Any = crop_size
_snake_case : List[Any] = do_rescale
_snake_case : List[str] = rescale_factor
_snake_case : Union[str, Any] = do_normalize
_snake_case : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
_snake_case : Dict = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case : Optional[int] = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
_snake_case : List[Any] = get_size_dict(lowercase__ )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[str] = do_resize if do_resize is not None else self.do_resize
_snake_case : int = size if size is not None else self.size
_snake_case : Optional[int] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_snake_case : List[str] = resample if resample is not None else self.resample
_snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_snake_case : Dict = get_size_dict(lowercase__ )
_snake_case : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_snake_case : Optional[int] = image_std if image_std is not None else self.image_std
_snake_case : Optional[int] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_snake_case : Union[str, Any] = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
_snake_case : Dict = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
_snake_case : Any = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
_snake_case : int = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
_snake_case : Any = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
_snake_case : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : int = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_snake_case : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_snake_case : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : Dict = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = []
_snake_case : Tuple = 11
_snake_case : Optional[int] = int('''1''' + '''0''' * digit_len )
for num in range(lowerCAmelCase_ , lowerCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCAmelCase_ , lowerCAmelCase_ ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
_snake_case : int = 10
return solutions
def _a ( lowerCAmelCase_ = 2 ):
"""simple docstring"""
_snake_case : Optional[int] = 1.0
for fraction in fraction_list(lowerCAmelCase_ ):
_snake_case : Optional[Any] = Fraction(lowerCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 718
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (a__ , unittest.TestCase ):
_lowercase : List[str] = XLMTokenizer
_lowercase : List[str] = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_snake_case : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_snake_case : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[Any] = '''lower newer'''
_snake_case : Dict = '''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
_snake_case : Optional[int] = '''lower'''
_snake_case : str = ['''low''', '''er</w>''']
_snake_case : Optional[Any] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_snake_case : Tuple = tokens + ['''<unk>''']
_snake_case : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
_snake_case : str = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
_snake_case : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
_snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 719
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 0
|
'''simple docstring'''
import os
UpperCAmelCase : Optional[int] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Tuple = 0
while index < len(lowerCAmelCase_ ) - 1:
_snake_case : str = SYMBOLS[numerals[index]]
_snake_case : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ''''''
_snake_case : Any = num // 1_000
numerals += m_count * "M"
num %= 1_000
_snake_case : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_snake_case : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _a ( lowerCAmelCase_ = "/p089_roman.txt" ):
"""simple docstring"""
_snake_case : Tuple = 0
with open(os.path.dirname(lowerCAmelCase_ ) + roman_numerals_filename ) as filea:
_snake_case : Optional[int] = filea.readlines()
for line in lines:
_snake_case : Any = line.strip()
_snake_case : str = parse_roman_numerals(lowerCAmelCase_ )
_snake_case : Optional[int] = generate_roman_numerals(lowerCAmelCase_ )
savings += len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for char in word:
_snake_case : Tuple = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = set()
for token in tokens:
_snake_case : Tuple = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
_snake_case : Any = list(lowerCAmelCase_ )
return word_list
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_snake_case : str = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
_snake_case : Tuple = bert_tokens
_snake_case : Dict = 0, len(lowerCAmelCase_ )
while start < end:
_snake_case : Any = True
if is_chinese(bert_word[start] ):
_snake_case : int = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
_snake_case : int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_snake_case : Optional[int] = '''##''' + bert_word[j]
_snake_case : Dict = start + i
_snake_case : Dict = False
break
if single_word:
start += 1
return bert_word
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = []
for i in range(0 , len(lowerCAmelCase_ ) , 100 ):
_snake_case : Any = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
_snake_case : List[Any] = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
_snake_case : Dict = []
for i in range(0 , len(lowerCAmelCase_ ) , 100 ):
_snake_case : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
_snake_case : Optional[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Dict = []
for id in input_ids:
_snake_case : Optional[int] = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
_snake_case : Dict = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
_snake_case : Tuple = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_snake_case : int = f.readlines()
_snake_case : int = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_snake_case : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
_snake_case : Any = BertTokenizer.from_pretrained(args.bert )
_snake_case : Tuple = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_snake_case : Dict = [json.dumps(lowerCAmelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[str] = """wavlm"""
def __init__(self , lowercase__=32 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase__=(5, 2, 2, 2, 2, 2, 2) , lowercase__=(10, 3, 3, 3, 3, 2, 2) , lowercase__=False , lowercase__=1_28 , lowercase__=16 , lowercase__=3_20 , lowercase__=8_00 , lowercase__=False , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=3_20 , lowercase__=2 , lowercase__=0.1 , lowercase__=1_00 , lowercase__=2_56 , lowercase__=2_56 , lowercase__=0.1 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_56 , lowercase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowercase__=(5, 3, 3, 1, 1) , lowercase__=(1, 2, 3, 1, 1) , lowercase__=5_12 , lowercase__=80 , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=3 , lowercase__=2 , lowercase__=3 , lowercase__=None , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
snake_case_ : str = hidden_size
snake_case_ : List[str] = feat_extract_norm
snake_case_ : Tuple = feat_extract_activation
snake_case_ : Any = list(lowercase__ )
snake_case_ : Dict = list(lowercase__ )
snake_case_ : Dict = list(lowercase__ )
snake_case_ : Tuple = conv_bias
snake_case_ : Optional[int] = num_buckets
snake_case_ : List[Any] = max_bucket_distance
snake_case_ : Union[str, Any] = num_conv_pos_embeddings
snake_case_ : int = num_conv_pos_embedding_groups
snake_case_ : List[Any] = len(self.conv_dim )
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Any = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Dict = num_attention_heads
snake_case_ : str = hidden_dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : Dict = feat_proj_dropout
snake_case_ : List[str] = final_dropout
snake_case_ : Union[str, Any] = layerdrop
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Any = num_ctc_classes
snake_case_ : str = vocab_size
snake_case_ : Any = do_stable_layer_norm
snake_case_ : Optional[int] = use_weighted_layer_sum
snake_case_ : Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : Optional[Any] = apply_spec_augment
snake_case_ : Tuple = mask_time_prob
snake_case_ : Optional[Any] = mask_time_length
snake_case_ : List[Any] = mask_time_min_masks
snake_case_ : Optional[Any] = mask_feature_prob
snake_case_ : Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case_ : int = num_codevectors_per_group
snake_case_ : Optional[Any] = num_codevector_groups
snake_case_ : List[Any] = contrastive_logits_temperature
snake_case_ : str = num_negatives
snake_case_ : List[str] = codevector_dim
snake_case_ : Optional[Any] = proj_codevector_dim
snake_case_ : List[str] = diversity_loss_weight
# ctc loss
snake_case_ : List[Any] = ctc_loss_reduction
snake_case_ : Dict = ctc_zero_infinity
# adapter
snake_case_ : Any = add_adapter
snake_case_ : Union[str, Any] = adapter_kernel_size
snake_case_ : str = adapter_stride
snake_case_ : str = num_adapter_layers
snake_case_ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ : str = list(lowercase__ )
snake_case_ : List[str] = list(lowercase__ )
snake_case_ : str = list(lowercase__ )
snake_case_ : str = xvector_output_dim
@property
def __UpperCamelCase (self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , ):
snake_case_ : Tuple = parent
snake_case_ : int = 13
snake_case_ : Dict = 7
snake_case_ : int = True
snake_case_ : Union[str, Any] = True
snake_case_ : int = False
snake_case_ : Any = True
snake_case_ : Any = 99
snake_case_ : Any = 32
snake_case_ : Any = 2
snake_case_ : Any = 4
snake_case_ : str = 37
snake_case_ : Optional[Any] = """gelu"""
snake_case_ : Any = 0.1
snake_case_ : Union[str, Any] = 0.1
snake_case_ : Optional[int] = 5_12
snake_case_ : Any = 16
snake_case_ : Optional[int] = 2
snake_case_ : Optional[int] = 0.02
snake_case_ : List[str] = 3
snake_case_ : List[str] = 4
snake_case_ : List[str] = None
def __UpperCamelCase (self ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_input_mask:
snake_case_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : List[str] = TFDistilBertModel(config=lowercase__ )
snake_case_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ : List[Any] = model(lowercase__ )
snake_case_ : Optional[Any] = [input_ids, input_mask]
snake_case_ : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = TFDistilBertForMaskedLM(config=lowercase__ )
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Any = TFDistilBertForQuestionAnswering(config=lowercase__ )
snake_case_ : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
snake_case_ : int = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Optional[Any] = self.num_labels
snake_case_ : Any = TFDistilBertForSequenceClassification(lowercase__ )
snake_case_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : int = self.num_choices
snake_case_ : Union[str, Any] = TFDistilBertForMultipleChoice(lowercase__ )
snake_case_ : List[str] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Any = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
snake_case_ : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Any = TFDistilBertForTokenClassification(lowercase__ )
snake_case_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
snake_case_ : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) : Dict = config_and_inputs
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_A : Dict = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Tuple = False
_A : int = False
def __UpperCamelCase (self ):
snake_case_ : Any = TFDistilBertModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=lowercase__ , dim=37 )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
@slow
def __UpperCamelCase (self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case_ : List[Any] = TFDistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_tf
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def __UpperCamelCase (self ):
snake_case_ : str = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
snake_case_ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Union[str, Any] = model(lowercase__ )[0]
snake_case_ : int = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase__ )
snake_case_ : Optional[int] = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1e-4 )
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , **lowercase__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = KandinskyInpaintPipeline
_A : List[str] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_A : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_A : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_A : Tuple = False
@property
def __UpperCamelCase (self ):
return 32
@property
def __UpperCamelCase (self ):
return 32
@property
def __UpperCamelCase (self ):
return self.time_input_dim
@property
def __UpperCamelCase (self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase (self ):
return 1_00
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ : List[Any] = MultilingualCLIP(lowercase__ )
snake_case_ : Tuple = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : Dict = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowercase__ )
return model
@property
def __UpperCamelCase (self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : Optional[Any] = self.dummy_unet
snake_case_ : int = self.dummy_movq
snake_case_ : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase__ , )
snake_case_ : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase (self , lowercase__ , lowercase__=0 ):
snake_case_ : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase__ )
# create init_image
snake_case_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Dict = Image.fromarray(np.uinta(lowercase__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
snake_case_ : Any = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : List[Any] = 0
if str(lowercase__ ).startswith("""mps""" ):
snake_case_ : Any = torch.manual_seed(lowercase__ )
else:
snake_case_ : Dict = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
snake_case_ : Union[str, Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """cpu"""
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : Any = self.pipeline_class(**lowercase__ )
snake_case_ : Any = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase__ ) )
snake_case_ : List[Any] = output.images
snake_case_ : Any = pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
snake_case_ : List[str] = image[0, -3:, -3:, -1]
snake_case_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
snake_case_ : Dict = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __UpperCamelCase (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase (self ):
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
snake_case_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case_ : int = np.ones((7_68, 7_68) , dtype=np.floataa )
snake_case_ : int = 0
snake_case_ : str = """a hat"""
snake_case_ : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
snake_case_ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
snake_case_ : List[str] = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
snake_case_ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ , snake_case_ : Union[str, Any] = pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ : Optional[int] = pipeline(
lowercase__ , image=lowercase__ , mask_image=lowercase__ , image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a_ = logging.get_logger(__name__)
a_ = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """bloom"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : List[str] = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__(self , lowercase__=25_08_80 , lowercase__=64 , lowercase__=2 , lowercase__=8 , lowercase__=1e-5 , lowercase__=0.02 , lowercase__=True , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=0.0 , lowercase__=0.0 , lowercase__=1 , lowercase__=False , **lowercase__ , ):
snake_case_ : Any = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ : Union[str, Any] = kwargs.pop("""n_embed""" , lowercase__ )
snake_case_ : Any = hidden_size if n_embed is None else n_embed
snake_case_ : Optional[Any] = n_layer
snake_case_ : Any = n_head
snake_case_ : Optional[int] = layer_norm_epsilon
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = use_cache
snake_case_ : str = pretraining_tp
snake_case_ : Dict = apply_residual_connection_post_layernorm
snake_case_ : Tuple = hidden_dropout
snake_case_ : str = attention_dropout
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : Any = eos_token_id
snake_case_ : Any = slow_but_exact
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = version.parse("""1.12""")
def __init__(self , lowercase__ , lowercase__ = "default" , lowercase__ = None , lowercase__ = False , ):
super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ )
if not getattr(self._config , """pad_token_id""" , lowercase__ ):
# TODO: how to do that better?
snake_case_ : Optional[Any] = 0
@property
def __UpperCamelCase (self ):
snake_case_ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase__ , direction="""inputs""" , inverted_values_shape=lowercase__ )
snake_case_ : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __UpperCamelCase (self ):
return self._config.n_layer
@property
def __UpperCamelCase (self ):
return self._config.n_head
@property
def __UpperCamelCase (self ):
return 1e-3
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
snake_case_ : Any = super(lowercase__ , self ).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
snake_case_ : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_ , snake_case_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ : str = seqlen + 2
snake_case_ : List[Any] = self._config.hidden_size // self.num_attention_heads
snake_case_ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case_ : List[Any] = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
snake_case_ : int = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ : List[str] = ordered_inputs["""attention_mask"""].dtype
snake_case_ : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase (self ):
return 13
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a_ = logging.get_logger(__name__)
a_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = """dpt"""
def __init__(self , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=3_84 , lowercase__=16 , lowercase__=3 , lowercase__=False , lowercase__=True , lowercase__=[2, 5, 8, 11] , lowercase__="project" , lowercase__=[4, 2, 1, 0.5] , lowercase__=[96, 1_92, 3_84, 7_68] , lowercase__=2_56 , lowercase__=-1 , lowercase__=False , lowercase__=True , lowercase__=0.4 , lowercase__=2_55 , lowercase__=0.1 , lowercase__=[1, 10_24, 24, 24] , lowercase__=[0, 1] , lowercase__=None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case_ : Tuple = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
snake_case_ : Any = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case_ : List[Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : str = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
snake_case_ : List[str] = backbone_featmap_shape
snake_case_ : Any = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
snake_case_ : Optional[int] = None
snake_case_ : Dict = None
snake_case_ : List[Any] = []
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : int = initializer_range
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[int] = image_size
snake_case_ : str = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Union[str, Any] = qkv_bias
snake_case_ : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
snake_case_ : str = readout_type
snake_case_ : List[Any] = reassemble_factors
snake_case_ : Union[str, Any] = neck_hidden_sizes
snake_case_ : List[Any] = fusion_hidden_size
snake_case_ : Any = head_in_index
snake_case_ : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ : str = use_auxiliary_head
snake_case_ : List[Any] = auxiliary_loss_weight
snake_case_ : List[Any] = semantic_loss_ignore_index
snake_case_ : Optional[Any] = semantic_classifier_dropout
def __UpperCamelCase (self ):
snake_case_ : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a_ = get_tests_dir('''fixtures''')
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Union[str, Any] = mock.Mock()
snake_case_ : Union[str, Any] = 5_00
snake_case_ : str = {}
snake_case_ : Dict = HTTPError
snake_case_ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[str] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowercase__ ) as mock_head:
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase (self ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __UpperCamelCase (cls ):
snake_case_ : List[Any] = TOKEN
HfFolder.save_token(lowercase__ )
@classmethod
def __UpperCamelCase (cls ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __UpperCamelCase (self ):
snake_case_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase__ , repo_id="""test-feature-extractor""" , push_to_hub=lowercase__ , use_auth_token=self._token )
snake_case_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase__ , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowercase__ , use_auth_token=self._token )
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def __UpperCamelCase (self ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : List[Any] = CustomFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
snake_case_ : str = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowercase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '''▁'''
a_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a_ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
a_ = {
'''facebook/xglm-564M''': 2048,
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ["""input_ids""", """attention_mask"""]
def __init__(self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__ = None , **lowercase__ , ):
snake_case_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case_ : Tuple = 7
snake_case_ : Any = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
snake_case_ : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
snake_case_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : str = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
snake_case_ : List[str] = len(self.sp_model )
snake_case_ : List[str] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase__ )
snake_case_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
snake_case_ : List[str] = self.__dict__.copy()
snake_case_ : Dict = None
snake_case_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , lowercase__ ):
snake_case_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ : Tuple = {}
snake_case_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case_ : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ ))
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ ))
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __UpperCamelCase (self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase (self , lowercase__ ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : Union[str, Any] = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase (self , lowercase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = """""".join(lowercase__ ).replace(lowercase__ , """ """ ).strip()
return out_string
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : Any = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , """wb""" ) as fi:
snake_case_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[Any] = """mask2former"""
_A : List[Any] = ["""swin"""]
_A : Any = {"""hidden_size""": """hidden_dim"""}
def __init__(self , lowercase__ = None , lowercase__ = 2_56 , lowercase__ = 2_56 , lowercase__ = 2_56 , lowercase__ = 10_24 , lowercase__ = "relu" , lowercase__ = 6 , lowercase__ = 10 , lowercase__ = 8 , lowercase__ = 0.0 , lowercase__ = 20_48 , lowercase__ = False , lowercase__ = False , lowercase__ = 4 , lowercase__ = 2_55 , lowercase__ = 1_00 , lowercase__ = 0.1 , lowercase__ = 2.0 , lowercase__ = 5.0 , lowercase__ = 5.0 , lowercase__ = 1_25_44 , lowercase__ = 3.0 , lowercase__ = 0.75 , lowercase__ = 0.02 , lowercase__ = 1.0 , lowercase__ = True , lowercase__ = [4, 8, 16, 32] , lowercase__ = None , **lowercase__ , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
snake_case_ : Dict = CONFIG_MAPPING["""swin"""](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : int = backbone_config.pop("""model_type""" )
snake_case_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : int = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
snake_case_ : Optional[Any] = backbone_config
snake_case_ : List[str] = feature_size
snake_case_ : Optional[Any] = mask_feature_size
snake_case_ : List[str] = hidden_dim
snake_case_ : str = encoder_feedforward_dim
snake_case_ : str = activation_function
snake_case_ : Optional[int] = encoder_layers
snake_case_ : Dict = decoder_layers
snake_case_ : int = num_attention_heads
snake_case_ : str = dropout
snake_case_ : Optional[int] = dim_feedforward
snake_case_ : Tuple = pre_norm
snake_case_ : List[Any] = enforce_input_projection
snake_case_ : Tuple = common_stride
snake_case_ : Optional[int] = ignore_value
snake_case_ : Tuple = num_queries
snake_case_ : str = no_object_weight
snake_case_ : List[str] = class_weight
snake_case_ : List[str] = mask_weight
snake_case_ : str = dice_weight
snake_case_ : Tuple = train_num_points
snake_case_ : List[str] = oversample_ratio
snake_case_ : str = importance_sample_ratio
snake_case_ : str = init_std
snake_case_ : int = init_xavier_std
snake_case_ : Tuple = use_auxiliary_loss
snake_case_ : Optional[int] = feature_strides
snake_case_ : Any = output_auxiliary_logits
snake_case_ : List[Any] = decoder_layers
super().__init__(**lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , **lowercase__ ):
return cls(
backbone_config=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.backbone_config.to_dict()
snake_case_ : str = self.__class__.model_type
return output
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
snake_case_ : Optional[int] = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
snake_case_ : List[str] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1E-5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
snake_case_ , snake_case_ : str = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case_ : Optional[Any] = [1] * inputs.shape.rank
snake_case_ : Any = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
snake_case_ : List[str] = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
snake_case_ : List[Any] = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Dict=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case_ : int = tf.shape(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case_ : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : tf.Tensor ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
snake_case_ : Optional[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case_ : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case_ : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Any = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case_ : Optional[int] = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
snake_case_ : str = np.asarray(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[Any] = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case_ : Any = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[Any] = chunk_data
else:
snake_case_ : Union[str, Any] = data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if name in group.attrs:
snake_case_ : Tuple = [n.decode("""utf8""" ) if hasattr(SCREAMING_SNAKE_CASE__ , """decode""" ) else n for n in group.attrs[name]]
else:
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(SCREAMING_SNAKE_CASE__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : int = MODEL_FOR_MASKED_LM_MAPPING
_A : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCamelCase (self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCamelCase (self ):
snake_case_ : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case_ : Dict = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_80_15, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_55_06, """token_str""": """ accuser"""},
] , )
snake_case_ : Any = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_80_15,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_55_06,
"""token_str""": """ accuser""",
},
] , )
snake_case_ : List[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_36_06, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 29_41, """token_str""": """ Te"""},
] , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case_ : str = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_56_76, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS"""},
] , )
snake_case_ : str = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS"""},
] , )
snake_case_ : Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 29_41, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_36_06, """token_str""": """ Clara"""},
] , )
snake_case_ : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __UpperCamelCase (self ):
snake_case_ : List[Any] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case_ : Any = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
@require_torch
def __UpperCamelCase (self ):
snake_case_ : Tuple = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(lowercase__ )
@slow
@require_tf
def __UpperCamelCase (self ):
snake_case_ : List[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Dict = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 6_10, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 15_73, """token_str""": """ Chris"""},
] , )
snake_case_ : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 22_01,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_27_90,
"""token_str""": """ Lyon""",
},
] , )
snake_case_ : Dict = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_36_06, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 29_41, """token_str""": """ Te"""},
] , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case_ : Optional[Any] = None
snake_case_ : Optional[Any] = None
self.run_pipeline_test(lowercase__ , [] )
@require_tf
def __UpperCamelCase (self ):
snake_case_ : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case_ : str = None
snake_case_ : str = None
self.run_pipeline_test(lowercase__ , [] )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case_ : Dict = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
snake_case_ : Tuple = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Optional[int] = fill_masker.tokenizer
snake_case_ : Optional[int] = fill_masker.model
snake_case_ : Any = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase__ , [
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
] , )
snake_case_ : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
lowercase__ , [
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
] , )
snake_case_ : List[Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
lowercase__ , [
[
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
],
[
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
],
] , )
with self.assertRaises(lowercase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase__ ):
fill_masker("""This is""" )
self.run_test_top_k(lowercase__ , lowercase__ )
self.run_test_targets(lowercase__ , lowercase__ )
self.run_test_top_k_targets(lowercase__ , lowercase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase__ , lowercase__ )
self.fill_mask_with_multiple_masks(lowercase__ , lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Tuple = tokenizer.get_vocab()
snake_case_ : Tuple = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ : Union[str, Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , targets=lowercase__ )
snake_case_ : Any = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase__ , [
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
] , )
snake_case_ : str = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowercase__ )
snake_case_ : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowercase__ ) )
# Call argument
snake_case_ : Optional[Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
snake_case_ : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase__ )
self.assertEqual(
lowercase__ , [
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
] , )
snake_case_ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowercase__ )
snake_case_ : Dict = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowercase__ ) )
# Score equivalence
snake_case_ : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase__ )
snake_case_ : List[Any] = [top_mask["""token_str"""] for top_mask in outputs]
snake_case_ : Dict = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ) == set(lowercase__ ):
snake_case_ : Optional[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase__ )
snake_case_ : Tuple = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
# Raises with invalid
with self.assertRaises(lowercase__ ):
snake_case_ : Optional[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase__ ):
snake_case_ : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] )
with self.assertRaises(lowercase__ ):
snake_case_ : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[str] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , top_k=2 )
snake_case_ : Optional[Any] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase__ , [
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
] , )
snake_case_ : Optional[Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
snake_case_ : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase__ , [
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
] , )
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[str] = tokenizer.get_vocab()
snake_case_ : int = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
# top_k=2, ntargets=3
snake_case_ : Optional[int] = sorted(vocab.keys() )[:3]
snake_case_ : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ : Any = [el["""token_str"""] for el in sorted(lowercase__ , key=lambda lowercase__ : x["score"] , reverse=lowercase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ).issubset(lowercase__ ):
snake_case_ : Any = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[str] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
snake_case_ : Optional[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ : str = sorted(vocab.keys() )[:3]
snake_case_ : int = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ : Any = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase__ ) , 3 )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
snake_case_ : List[str] = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase__ , [
[
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
],
[
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
],
[
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
{"""sequence""": ANY(lowercase__ ), """score""": ANY(lowercase__ ), """token""": ANY(lowercase__ ), """token_str""": ANY(lowercase__ )},
],
] , )
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : Optional[Any] = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase (self ):
snake_case_ : List[str] = (3, 32, 1_28)
snake_case_ : Any = tempfile.mkdtemp()
# fmt: off
snake_case_ : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
snake_case_ : Optional[int] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase__ , lowercase__ )
def __UpperCamelCase (self , **lowercase__ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , **lowercase__ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase (self ):
snake_case_ : Any = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
snake_case_ : str = Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) )
return image_input
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Any = self.get_image_processor()
snake_case_ : List[Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Any = self.get_image_processor()
snake_case_ : Optional[Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
snake_case_ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : int = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
snake_case_ : Any = self.prepare_image_inputs()
snake_case_ : Optional[int] = image_processor(lowercase__ , return_tensors="""np""" )
snake_case_ : Union[str, Any] = processor(images=lowercase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : List[str] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
snake_case_ : Any = """test"""
snake_case_ : str = processor(text=lowercase__ )
snake_case_ : Optional[Any] = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
snake_case_ : Union[str, Any] = """test"""
snake_case_ : Union[str, Any] = self.prepare_image_inputs()
snake_case_ : Tuple = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
snake_case_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.char_decode(lowercase__ )
snake_case_ : List[str] = tokenizer.batch_decode(lowercase__ )
snake_case_ : Optional[Any] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : str = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
snake_case_ : List[str] = None
snake_case_ : Optional[int] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Tuple = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
snake_case_ : Union[str, Any] = torch.randn(1 , 27 , 38 )
snake_case_ : Tuple = torch.randn(1 , 27 , 5_02_57 )
snake_case_ : str = torch.randn(1 , 27 , 3_05_22 )
snake_case_ : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
a_ = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
a_ = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
a_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
a_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Optional[int] = size if size is not None else {"""shortest_edge""": 2_56}
snake_case_ : Any = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : str = do_resize
snake_case_ : Dict = size
snake_case_ : List[Any] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : Optional[Any] = do_rescale
snake_case_ : str = rescale_factor
snake_case_ : List[str] = do_normalize
snake_case_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
snake_case_ : str = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : Optional[int] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : List[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : Union[str, Any] = resample if resample is not None else self.resample
snake_case_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : Dict = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Any = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : Any = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Any = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase__ ):
snake_case_ : List[str] = target_sizes.numpy()
snake_case_ : Any = []
for idx in range(len(lowercase__ ) ):
snake_case_ : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase__ )
snake_case_ : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
snake_case_ : str = logits.argmax(dim=1 )
snake_case_ : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a_ = datasets.load_iris()
a_ = np.array(data['''data'''])
a_ = np.array(data['''target'''])
a_ = data['''target_names''']
a_ , a_ , a_ , a_ = train_test_split(X, y)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE__ ) - np.array(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=5 ):
"""simple docstring"""
snake_case_ : Any = zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# List of distances of all points from the point to be classified
snake_case_ : Dict = []
for data_point in data:
snake_case_ : Union[str, Any] = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
snake_case_ : str = [i[1] for i in sorted(SCREAMING_SNAKE_CASE__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
snake_case_ : Dict = Counter(SCREAMING_SNAKE_CASE__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """falcon"""
_A : str = ["""past_key_values"""]
def __init__(self , lowercase__=6_50_24 , lowercase__=45_44 , lowercase__=32 , lowercase__=71 , lowercase__=1e-5 , lowercase__=0.02 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=11 , lowercase__=11 , **lowercase__ , ):
snake_case_ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ : str = kwargs.pop("""n_embed""" , lowercase__ )
snake_case_ : Any = hidden_size if n_embed is None else n_embed
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[int] = layer_norm_epsilon
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = hidden_dropout
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : Tuple = bos_token_id
snake_case_ : Tuple = eos_token_id
snake_case_ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case_ : Union[str, Any] = alibi
snake_case_ : Tuple = new_decoder_architecture
snake_case_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
snake_case_ : Tuple = parallel_attn
snake_case_ : Union[str, Any] = bias
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
return self.hidden_size // self.num_attention_heads
@property
def __UpperCamelCase (self ):
return not self.alibi
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="resnet50" , lowercase__=3 , lowercase__=32 , lowercase__=3 , lowercase__=True , lowercase__=True , ):
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = out_indices if out_indices is not None else [4]
snake_case_ : int = stage_names
snake_case_ : str = out_features
snake_case_ : Optional[Any] = backbone
snake_case_ : Tuple = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : List[str] = num_channels
snake_case_ : str = use_pretrained_backbone
snake_case_ : List[str] = is_training
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[str] = self.get_config()
return config, pixel_values
def __UpperCamelCase (self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[str] = TimmBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
snake_case_ : str = model(lowercase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Union[str, Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_A : Optional[Any] = False
_A : Dict = False
_A : List[Any] = False
_A : int = False
def __UpperCamelCase (self ):
snake_case_ : List[str] = TimmBackboneModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = """resnet18"""
snake_case_ : Union[str, Any] = """microsoft/resnet-18"""
snake_case_ : Optional[Any] = AutoBackbone.from_pretrained(lowercase__ , use_timm_backbone=lowercase__ )
snake_case_ : Optional[Any] = AutoBackbone.from_pretrained(lowercase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case_ : Tuple = AutoBackbone.from_pretrained(lowercase__ , use_timm_backbone=lowercase__ , out_indices=[1, 2, 3] )
snake_case_ : List[Any] = AutoBackbone.from_pretrained(lowercase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(lowercase__ )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case_ : str = self.all_model_classes[0]
snake_case_ : str = model_class(lowercase__ )
model.to(lowercase__ )
snake_case_ : Optional[Any] = self._prepare_for_class(lowercase__ , lowercase__ )
snake_case_ : List[Any] = model(**lowercase__ )
snake_case_ : str = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case_ : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case_ : str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Tuple = model(**lowercase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case_ : str = copy.deepcopy(lowercase__ )
snake_case_ : List[str] = None
snake_case_ : Tuple = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : str = model(**lowercase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case_ : Dict = copy.deepcopy(lowercase__ )
snake_case_ : Any = False
snake_case_ : List[str] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Any = model(**lowercase__ )
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
a_ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case_ : List[str] = """lm_head"""
snake_case_ : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : List[str] = value
elif weight_type == "weight_g":
snake_case_ : Optional[Any] = value
elif weight_type == "weight_v":
snake_case_ : List[str] = value
elif weight_type == "bias":
snake_case_ : Optional[int] = value
else:
snake_case_ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Dict = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Dict = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
snake_case_ : Union[str, Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
snake_case_ : Tuple = """weight_v"""
elif "bias" in name:
snake_case_ : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : str = """weight"""
else:
snake_case_ : List[str] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Any = full_name.split("""conv_layers.""" )[-1]
snake_case_ : int = name.split(""".""" )
snake_case_ : List[Any] = int(items[0] )
snake_case_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=True ):
"""simple docstring"""
if config_path is not None:
snake_case_ : Union[str, Any] = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case_ : Optional[int] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ : Union[str, Any] = target_dict.pad_index
snake_case_ : List[Any] = target_dict.bos_index
snake_case_ : str = target_dict.eos_index
snake_case_ : str = len(target_dict.symbols )
snake_case_ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ : int = 4_2
snake_case_ : Optional[int] = 4_3
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
snake_case_ : str = True if config.feat_extract_norm == """layer""" else False
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Dict = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
snake_case_ , snake_case_ , snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case_ : Any = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ , snake_case_ : Union[str, Any] = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case_ , snake_case_ : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : float
_A : str
_A : bool
@dataclass
class __lowercase :
"""simple docstring"""
_A : int = 42
_A : str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __lowercase :
"""simple docstring"""
_A : bool = False
_A : bool = True
_A : Optional[bool] = None
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """titi"""
_A : str = """toto"""
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """titi"""
_A : Dict = """toto"""
_A : Tuple = 42
@dataclass
class __lowercase :
"""simple docstring"""
_A : BasicEnum = "toto"
def __UpperCamelCase (self ):
snake_case_ : int = BasicEnum(self.foo )
@dataclass
class __lowercase :
"""simple docstring"""
_A : MixedTypeEnum = "toto"
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class __lowercase :
"""simple docstring"""
_A : Optional[int] = None
_A : Optional[float] = field(default=_UpperCAmelCase , metadata={"""help""": """help message"""})
_A : Optional[str] = None
_A : Optional[List[str]] = list_field(default=[])
_A : Optional[List[int]] = list_field(default=[])
@dataclass
class __lowercase :
"""simple docstring"""
_A : List[int] = list_field(default=[])
_A : List[int] = list_field(default=[1, 2, 3])
_A : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
_A : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __lowercase :
"""simple docstring"""
_A : List[int] = field()
_A : str = field()
_A : BasicEnum = field()
def __UpperCamelCase (self ):
snake_case_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : "BasicEnum" = field()
_A : "Optional[bool]" = None
_A : "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
_A : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __lowercase :
"""simple docstring"""
_A : bool = False
_A : bool = True
_A : bool | None = None
@dataclass
class __lowercase :
"""simple docstring"""
_A : int | None = None
_A : float | None = field(default=_UpperCAmelCase , metadata={"""help""": """help message"""})
_A : str | None = None
_A : list[str] | None = list_field(default=[])
_A : list[int] | None = list_field(default=[])
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ : List[str] = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
snake_case_ : Optional[Any] = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase__ ) and yy.get("""choices""" , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase__ ) , yy["""type"""](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Any = HfArgumentParser(lowercase__ )
snake_case_ : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--bar""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--baz""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--flag""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase__ , help="""help message""" )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase__ , default=lowercase__ )
snake_case_ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
snake_case_ : Any = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Optional[int] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Optional[Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Optional[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Union[str, Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ : Dict = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ : Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ : List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase (self ):
@dataclass
class __lowercase :
"""simple docstring"""
_A : Literal["titi", "toto", 42] = "toto"
snake_case_ : List[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ : Optional[int] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ : List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase (self ):
snake_case_ : str = HfArgumentParser(lowercase__ )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase (self ):
snake_case_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase__ , type=lowercase__ )
expected.add_argument("""--bar""" , default=lowercase__ , type=lowercase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase__ , type=lowercase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase__ )
snake_case_ : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
snake_case_ : Dict = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
snake_case_ : Optional[int] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--required_str""" , type=lowercase__ , required=lowercase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = HfArgumentParser(lowercase__ )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , required=lowercase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase__ , )
expected.add_argument("""--opt""" , type=lowercase__ , default=lowercase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ : List[Any] = parser.parse_dict(lowercase__ )[0]
snake_case_ : Optional[int] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = HfArgumentParser(lowercase__ )
snake_case_ : Optional[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : int = os.path.join(lowercase__ , """temp_json""" )
os.mkdir(lowercase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ : List[Any] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = HfArgumentParser(lowercase__ )
snake_case_ : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Tuple = os.path.join(lowercase__ , """temp_yaml""" )
os.mkdir(lowercase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase__ , lowercase__ )
snake_case_ : Any = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ : Optional[int] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : int = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
a_ = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a_ = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
a_ = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowercase ( datasets.Metric):
"""simple docstring"""
def __UpperCamelCase (self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , ):
snake_case_ : int = len(references[0] )
if any(len(lowercase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case_ : Dict = [[refs[i] for refs in references] for i in range(lowercase__ )]
snake_case_ : int = TER(
normalized=lowercase__ , no_punct=lowercase__ , asian_support=lowercase__ , case_sensitive=lowercase__ , )
snake_case_ : Dict = sb_ter.corpus_score(lowercase__ , lowercase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = '''src/diffusers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
a_ = '''
{0} = None
'''
a_ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
a_ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Tuple = _re_backend.findall(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case_ : Any = 0
snake_case_ : str = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case_ : Any = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
snake_case_ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__ ) and len(lines[line_index] ) > 1:
snake_case_ : int = lines[line_index]
snake_case_ : Optional[int] = _re_single_line_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : int = objects
else:
line_index += 1
return backend_specific_objects
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__ )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
"""simple docstring"""
if backend_specific_objects is None:
snake_case_ : Any = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case_ : Any = {}
for backend, objects in backend_specific_objects.items():
snake_case_ : Optional[Any] = """[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
snake_case_ : str = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for o in objects] )
snake_case_ : str = dummy_file
return dummy_files
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict=False ):
"""simple docstring"""
snake_case_ : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case_ : Optional[int] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
snake_case_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """utils""" )
snake_case_ : Dict = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , f'dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py' )
for backend in dummy_files.keys()
}
snake_case_ : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Any = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Image , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Dict = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
a_ = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] ):
"""simple docstring"""
if not nums:
return 0
snake_case_ : Optional[Any] = nums[0]
snake_case_ : Tuple = 0
for num in nums[1:]:
snake_case_ , snake_case_ : str = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
)
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = 0
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
snake_case_ : Dict = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
snake_case_ : List[Any] = left
snake_case_ : List[Any] = point
elif point > right:
snake_case_ : List[Any] = right
snake_case_ : Tuple = point
else:
if item < current_item:
snake_case_ : List[Any] = point - 1
else:
snake_case_ : Union[str, Any] = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ : int = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
a_ = 67
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
a_ = 5
a_ = 10
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = SpeechaTextTokenizer
_A : Any = False
_A : Optional[Any] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : List[str] = sp.SentencePieceProcessor()
spm_model.Load(lowercase__ )
snake_case_ : Optional[int] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowercase__ ) )]
snake_case_ : List[Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : List[str] = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
snake_case_ : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self ):
snake_case_ : Dict = """<pad>"""
snake_case_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowercase__ ) , 10_01 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __UpperCamelCase (self ):
snake_case_ : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case_ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [2_89, 50, 14, 1_74, 3_86] , )
snake_case_ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
snake_case_ : Dict = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : List[str] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : str = """valhalla/s2t_mustc_multilinguial_medium"""
_A : Tuple = """C'est trop cool"""
_A : List[str] = """Esto es genial"""
@classmethod
def __UpperCamelCase (cls ):
snake_case_ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __UpperCamelCase (self ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __UpperCamelCase (self ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __UpperCamelCase (self ):
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
snake_case_ : List[str] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
snake_case_ : Optional[int] = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = """fr"""
snake_case_ : Optional[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowercase__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case_ : str = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
a_ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
a_ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Optional[int] = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case_ : Union[str, Any] = topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE__ )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
for vertice in vertices:
if vertice not in visited:
snake_case_ : List[str] = topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# return sort
return sort
if __name__ == "__main__":
a_ = topological_sort('''a''', [], [])
print(sort)
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""text""": Value("""string""")})
_A : ClassVar[Features] = Features({"""summary""": Value("""string""")})
_A : str = "text"
_A : str = "summary"
@property
def __UpperCamelCase (self ):
return {self.text_column: "text", self.summary_column: "summary"}
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=4_00 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , ):
snake_case_ : List[Any] = size if size is not None else {"""shortest_edge""": 20}
snake_case_ : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ : Union[str, Any] = parent
snake_case_ : Any = batch_size
snake_case_ : Tuple = num_channels
snake_case_ : Optional[int] = image_size
snake_case_ : int = min_resolution
snake_case_ : Union[str, Any] = max_resolution
snake_case_ : int = do_resize
snake_case_ : List[str] = size
snake_case_ : List[Any] = do_center_crop
snake_case_ : str = crop_size
snake_case_ : Dict = do_flip_channel_order
def __UpperCamelCase (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MobileViTImageProcessor if is_vision_available() else None
def __UpperCamelCase (self ):
snake_case_ : List[Any] = MobileViTImageProcessingTester(self )
@property
def __UpperCamelCase (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase__ , """size""" ) )
self.assertTrue(hasattr(lowercase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase__ , """center_crop""" ) )
self.assertTrue(hasattr(lowercase__ , """do_flip_channel_order""" ) )
def __UpperCamelCase (self ):
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Union[str, Any] = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Dict = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Any = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Any = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
snake_case_ : str = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE__ )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE__ )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
# Let's go
snake_case_ : Union[str, Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , """func""" ):
parser.print_help()
exit(1 )
# Run
snake_case_ : Tuple = args.func(SCREAMING_SNAKE_CASE__ )
service.run()
if __name__ == "__main__":
main()
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : str
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Tuple = {}
snake_case_ : Any = []
snake_case_ : Any = 1
snake_case_ : List[str] = [1, 2]
snake_case_ : Dict = {"""a""": 1, """b""": 2}
snake_case_ : Tuple = {"""a""": [1, 2], """b""": [3, 4]}
snake_case_ : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
snake_case_ : List[str] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
snake_case_ : List[str] = {}
snake_case_ : List[Any] = []
snake_case_ : Optional[int] = 2
snake_case_ : List[Any] = [2, 3]
snake_case_ : List[str] = {"""a""": 2, """b""": 3}
snake_case_ : Tuple = {"""a""": [2, 3], """b""": [4, 5]}
snake_case_ : Optional[Any] = {"""a""": {"""1""": 2}, """b""": 3}
snake_case_ : Optional[int] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
snake_case_ : Optional[Any] = 2
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
snake_case_ : List[str] = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
snake_case_ : List[str] = {"""a""": 2, """b""": 0, """c""": 2}
snake_case_ : Any = {
"""a""": np.eye(2 ).astype(lowercase__ ),
"""b""": np.zeros(3 ).astype(lowercase__ ),
"""c""": np.ones(2 ).astype(lowercase__ ),
}
self.assertEqual(map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ ) , lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ , num_proc=lowercase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowercase__ ): # can't pickle a local lambda
map_nested(lambda lowercase__ : x + 1 , lowercase__ , num_proc=lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = {"""a""": 1, """b""": 2}
snake_case_ : Dict = {"""a""": 3, """b""": 4}
snake_case_ : Dict = {"""a""": 5, """b""": 6}
snake_case_ : List[Any] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowercase__ , lowercase__ , lowercase__ ) ) , lowercase__ )
def __UpperCamelCase (self ):
class __lowercase :
"""simple docstring"""
_A : str = """bar"""
snake_case_ : Dict = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(lowercase__ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
snake_case_ : Optional[Any] = {f'{i}': i for i in range(SCREAMING_SNAKE_CASE__ )}
snake_case_ : Union[str, Any] = map_nested(lambda SCREAMING_SNAKE_CASE__ : x + 1_0 , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@require_tf
def __UpperCamelCase (self ):
import tensorflow as tf
from tensorflow.keras import layers
snake_case_ : Tuple = layers.Dense(2 )
def gen_random_output():
snake_case_ : List[Any] = tf.random.uniform((1, 3) )
return model(lowercase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowercase__ ):
snake_case_ : Tuple = gen_random_output()
with temp_seed(42 , set_tensorflow=lowercase__ ):
snake_case_ : str = gen_random_output()
snake_case_ : Optional[int] = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase (self ):
import torch
def gen_random_output():
snake_case_ : Any = torch.nn.Linear(3 , 2 )
snake_case_ : Optional[int] = torch.rand(1 , 3 )
return model(lowercase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowercase__ ):
snake_case_ : Optional[Any] = gen_random_output()
with temp_seed(42 , set_pytorch=lowercase__ ):
snake_case_ : Optional[Any] = gen_random_output()
snake_case_ : Tuple = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase (self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
snake_case_ : List[Any] = gen_random_output()
with temp_seed(42 ):
snake_case_ : Union[str, Any] = gen_random_output()
snake_case_ : Optional[Any] = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).flatten()
assert output == expected_output
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = A(x=1 , y="""foobar""" )
snake_case_ : List[Any] = {"""x""": 1, """y""": """foobar"""}
assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output
snake_case_ : Tuple = {"""a""": {"""b""": A(x=1_0 , y="""foo""" )}, """c""": [A(x=2_0 , y="""bar""" )]}
snake_case_ : Union[str, Any] = {"""a""": {"""b""": {"""x""": 1_0, """y""": """foo"""}}, """c""": [{"""x""": 2_0, """y""": """bar"""}]}
assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
asdict([1, A(x=1_0 , y="""foo""" )] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return text.split()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
with Pool(2 ) as pool:
snake_case_ : Optional[int] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 1_0 ) )
assert out.count("""hello""" ) == 1_0
assert out.count("""there""" ) == 1_0
assert len(SCREAMING_SNAKE_CASE__ ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case_ : Union[str, Any] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 1_0 ) )
assert out.count("""hello""" ) == 1_0
assert out.count("""there""" ) == 1_0
assert len(SCREAMING_SNAKE_CASE__ ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case_ : Optional[int] = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(SCREAMING_SNAKE_CASE__ ) == 4
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = StableDiffusionPanoramaPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ : str = DDIMScheduler()
torch.manual_seed(0 )
snake_case_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case_ : Union[str, Any] = CLIPTextModel(lowercase__ )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase (self , lowercase__ , lowercase__=0 ):
snake_case_ : Any = torch.manual_seed(lowercase__ )
snake_case_ : List[str] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Any = StableDiffusionPanoramaPipeline(**lowercase__ )
snake_case_ : List[str] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : str = self.get_dummy_inputs(lowercase__ )
snake_case_ : str = sd_pipe(**lowercase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : str = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCamelCase (self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : Any = StableDiffusionPanoramaPipeline(**lowercase__ )
snake_case_ : List[str] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(lowercase__ )
snake_case_ : Union[str, Any] = """french fries"""
snake_case_ : List[Any] = sd_pipe(**lowercase__ , negative_prompt=lowercase__ )
snake_case_ : Any = output.images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[str] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : int = self.get_dummy_components()
snake_case_ : Union[str, Any] = StableDiffusionPanoramaPipeline(**lowercase__ )
snake_case_ : Any = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Tuple = self.get_dummy_inputs(lowercase__ )
snake_case_ : str = sd_pipe(**lowercase__ , view_batch_size=2 )
snake_case_ : str = output.images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : int = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
snake_case_ : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
snake_case_ : Any = StableDiffusionPanoramaPipeline(**lowercase__ )
snake_case_ : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : int = self.get_dummy_inputs(lowercase__ )
snake_case_ : Dict = sd_pipe(**lowercase__ ).images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : int = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : int = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=lowercase__ )
snake_case_ : Optional[int] = StableDiffusionPanoramaPipeline(**lowercase__ )
snake_case_ : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : str = self.get_dummy_inputs(lowercase__ )
snake_case_ : Dict = sd_pipe(**lowercase__ ).images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[str] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase (self , lowercase__=0 ):
snake_case_ : int = torch.manual_seed(lowercase__ )
snake_case_ : str = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Tuple = """stabilityai/stable-diffusion-2-base"""
snake_case_ : str = DDIMScheduler.from_pretrained(lowercase__ , subfolder="""scheduler""" )
snake_case_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
snake_case_ : Any = self.get_inputs()
snake_case_ : Tuple = pipe(**lowercase__ ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
snake_case_ : List[str] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=lowercase__ )
snake_case_ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = self.get_inputs()
snake_case_ : List[Any] = pipe(**lowercase__ ).images
snake_case_ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
snake_case_ : str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = 0
def callback_fn(lowercase__ , lowercase__ , lowercase__ ) -> None:
snake_case_ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case_ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
snake_case_ : Dict = latents[0, -3:, -3:, -1]
snake_case_ : Dict = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case_ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
snake_case_ : Optional[int] = latents[0, -3:, -3:, -1]
snake_case_ : int = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case_ : Tuple = False
snake_case_ : Optional[Any] = """stabilityai/stable-diffusion-2-base"""
snake_case_ : Dict = DDIMScheduler.from_pretrained(lowercase__ , subfolder="""scheduler""" )
snake_case_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ )
snake_case_ : Union[str, Any] = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = self.get_inputs()
pipe(**lowercase__ , callback=lowercase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCamelCase (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : str = """stabilityai/stable-diffusion-2-base"""
snake_case_ : Tuple = DDIMScheduler.from_pretrained(lowercase__ , subfolder="""scheduler""" )
snake_case_ : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ )
snake_case_ : Optional[int] = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = self.get_inputs()
snake_case_ : List[str] = pipe(**lowercase__ )
snake_case_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Any = data
snake_case_ : Node | None = None
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : Union[str, Any] = None
snake_case_ : int = None
def __iter__(self ):
snake_case_ : Tuple = self.head
while self.head:
yield node.data
snake_case_ : Union[str, Any] = node.next
if node == self.head:
break
def __len__(self ):
return sum(1 for _ in self )
def __repr__(self ):
return "->".join(str(lowercase__ ) for item in iter(self ) )
def __UpperCamelCase (self , lowercase__ ):
self.insert_nth(len(self ) , lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
self.insert_nth(0 , lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
snake_case_ : Union[str, Any] = Node(lowercase__ )
if self.head is None:
snake_case_ : List[Any] = new_node # first node points itself
snake_case_ : Union[str, Any] = new_node
elif index == 0: # insert at head
snake_case_ : int = self.head
snake_case_ : str = new_node
else:
snake_case_ : Any = self.head
for _ in range(index - 1 ):
snake_case_ : Union[str, Any] = temp.next
snake_case_ : Dict = temp.next
snake_case_ : List[str] = new_node
if index == len(self ) - 1: # insert at tail
snake_case_ : Tuple = new_node
def __UpperCamelCase (self ):
return self.delete_nth(0 )
def __UpperCamelCase (self ):
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase (self , lowercase__ = 0 ):
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
snake_case_ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
snake_case_ : Tuple = None
elif index == 0: # delete head node
snake_case_ : List[str] = self.tail.next.next
snake_case_ : Optional[Any] = self.head.next
else:
snake_case_ : Union[str, Any] = self.head
for _ in range(index - 1 ):
snake_case_ : Tuple = temp.next
snake_case_ : str = temp.next
snake_case_ : Optional[Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case_ : List[str] = temp
return delete_node.data
def __UpperCamelCase (self ):
return len(self ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __lowercase ( torch.nn.Module):
"""simple docstring"""
def __init__(self , lowercase__="sayef/fsner-bert-base-uncased" ):
super(lowercase__ , self ).__init__()
snake_case_ : Optional[Any] = AutoModel.from_pretrained(lowercase__ , return_dict=lowercase__ )
snake_case_ : int = torch.nn.CosineSimilarity(3 , 1e-08 )
snake_case_ : Optional[int] = torch.nn.Softmax(dim=1 )
def __UpperCamelCase (self , **lowercase__ ):
return self.bert(**lowercase__ ).last_hidden_state
def __UpperCamelCase (self , lowercase__ ):
return token_embeddings.sum(2 , keepdim=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=1 ):
return self.softmax(T * self.cos(lowercase__ , lowercase__ ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Tuple = W_supports["""sizes"""].tolist()
snake_case_ : Optional[Any] = W_supports["""start_token_id"""].item()
snake_case_ : Dict = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ : int = self.BERT(**lowercase__ )
snake_case_ : Union[str, Any] = self.BERT(**lowercase__ )
snake_case_ : Union[str, Any] = None
snake_case_ : List[Any] = None
snake_case_ : Optional[int] = W_supports["""input_ids"""] == start_token_id
snake_case_ : List[Any] = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowercase__ ):
if i == 0:
snake_case_ : Union[str, Any] = 0
else:
snake_case_ : List[Any] = support_sizes[i - 1]
snake_case_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
snake_case_ : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case_ : Union[str, Any] = torch.vstack((p_starts, p_start) )
snake_case_ : str = torch.vstack((p_ends, p_end) )
else:
snake_case_ : str = p_start
snake_case_ : List[str] = p_end
return p_starts, p_ends
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Tuple = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(SCREAMING_SNAKE_CASE__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0 ):
"""simple docstring"""
return sum(
int("""""".join(map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE__ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=[1, 1, 2] , lowercase__=1 , lowercase__=32 , lowercase__=4 , lowercase__=8 , lowercase__=37 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=5_12 , lowercase__=3 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=False , ):
snake_case_ : Union[str, Any] = parent
snake_case_ : List[str] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : int = is_training
snake_case_ : List[str] = use_input_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : int = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : List[Any] = block_sizes
snake_case_ : Any = num_decoder_layers
snake_case_ : Optional[Any] = d_model
snake_case_ : List[Any] = n_head
snake_case_ : Any = d_head
snake_case_ : Optional[int] = d_inner
snake_case_ : Tuple = hidden_act
snake_case_ : List[str] = hidden_dropout
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : Union[str, Any] = activation_dropout
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : Dict = 2
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : str = scope
snake_case_ : int = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ : List[Any] = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ : Optional[int] = self.num_hidden_layers + 2
def __UpperCamelCase (self ):
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_input_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[str] = None
snake_case_ : List[str] = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : Optional[int] = TFFunnelModel(config=lowercase__ )
snake_case_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Optional[Any] = model(lowercase__ )
snake_case_ : Tuple = [input_ids, input_mask]
snake_case_ : Any = model(lowercase__ )
snake_case_ : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ : Optional[int] = False
snake_case_ : int = TFFunnelModel(config=lowercase__ )
snake_case_ : int = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ : Dict = False
snake_case_ : Any = TFFunnelModel(config=lowercase__ )
snake_case_ : Any = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : int = TFFunnelBaseModel(config=lowercase__ )
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Optional[int] = model(lowercase__ )
snake_case_ : Optional[Any] = [input_ids, input_mask]
snake_case_ : Optional[Any] = model(lowercase__ )
snake_case_ : str = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ : Optional[int] = False
snake_case_ : Optional[Any] = TFFunnelBaseModel(config=lowercase__ )
snake_case_ : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ : Dict = False
snake_case_ : int = TFFunnelBaseModel(config=lowercase__ )
snake_case_ : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : str = TFFunnelForPreTraining(config=lowercase__ )
snake_case_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : Union[str, Any] = TFFunnelForMaskedLM(config=lowercase__ )
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Any = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Any = TFFunnelForSequenceClassification(config=lowercase__ )
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : Union[str, Any] = self.num_choices
snake_case_ : List[str] = TFFunnelForMultipleChoice(config=lowercase__ )
snake_case_ : str = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[int] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Any = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : int = self.num_labels
snake_case_ : Tuple = TFFunnelForTokenClassification(config=lowercase__ )
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : Tuple = TFFunnelForQuestionAnswering(config=lowercase__ )
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : str = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Union[str, Any] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Any = False
_A : List[str] = False
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = TFFunnelModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_A : str = False
_A : str = False
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = TFFunnelModelTester(self , base=lowercase__ )
snake_case_ : List[str] = ConfigTester(self , config_class=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__=10_24 , lowercase__=10_24 , lowercase__=3.6 ):
snake_case_ : List[str] = tokenizer
snake_case_ : Optional[Any] = tokenizer.bos_token_id
snake_case_ : List[Any] = dataset
snake_case_ : int = seq_length
snake_case_ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
snake_case_ : int = iter(self.dataset )
snake_case_ : Optional[int] = True
while more_examples:
snake_case_ , snake_case_ : Any = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowercase__ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
snake_case_ : int = False
break
snake_case_ : Optional[Any] = tokenizer(lowercase__ , truncation=lowercase__ )["""input_ids"""]
snake_case_ : List[Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowercase__ ) , self.seq_length ):
snake_case_ : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(lowercase__ ) == self.seq_length:
yield torch.tensor(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : List[Any] = {"""streaming""": True}
snake_case_ : Any = load_dataset(args.dataset_name , split="""train""" , **SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = ConstantLengthDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , seq_length=args.seq_length )
snake_case_ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
model.eval()
snake_case_ : List[Any] = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
with torch.no_grad():
snake_case_ : List[str] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
snake_case_ : Any = torch.mean(torch.cat(SCREAMING_SNAKE_CASE__ ) )
try:
snake_case_ : List[str] = torch.exp(SCREAMING_SNAKE_CASE__ )
except OverflowError:
snake_case_ : int = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
a_ , a_ = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if number > 0:
raise ValueError("""input must be a negative integer""" )
snake_case_ : int = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
snake_case_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
snake_case_ : List[str] = (
(
"""1"""
+ """0""" * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
a_ = '''Muhammad Umer Farooq'''
a_ = '''MIT'''
a_ = '''1.0.0'''
a_ = '''Muhammad Umer Farooq'''
a_ = '''contact@muhammadumerfarooq.me'''
a_ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ ):
super().__init__()
snake_case_ : list[str] = []
snake_case_ : Optional[Any] = domain
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case_ : Dict = parse.urljoin(self.domain , lowercase__ )
self.urls.append(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE__ ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return parse.urlparse(SCREAMING_SNAKE_CASE__ ).netloc
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str = "https://github.com" ):
"""simple docstring"""
snake_case_ : int = get_domain_name(SCREAMING_SNAKE_CASE__ )
# Initialize the parser
snake_case_ : str = Parser(SCREAMING_SNAKE_CASE__ )
try:
# Open URL
snake_case_ : Any = requests.get(SCREAMING_SNAKE_CASE__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case_ : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case_ : Any = requests.get(SCREAMING_SNAKE_CASE__ )
# Get the valid email.
snake_case_ : Optional[Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , **lowercase__ ):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[Any] = """fnet"""
def __init__(self , lowercase__=3_20_00 , lowercase__=7_68 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=5_12 , lowercase__=4 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=False , lowercase__=5_12 , lowercase__=3 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : str = max_position_embeddings
snake_case_ : List[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = type_vocab_size
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Dict = use_tpu_fourier_optimizations
snake_case_ : Tuple = tpu_short_seq_length
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] ):
"""simple docstring"""
snake_case_ : Dict = len(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a_ = spec.loader.load_module()
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
a_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ : List[str] = False
# source code of `config_class`
snake_case_ : Dict = inspect.getsource(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ , snake_case_ : int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : List[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ : Optional[Any] = True
break
snake_case_ : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : str = """\n""".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : str = """"""
snake_case_ : Dict = """"""
snake_case_ : List[str] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = 2_56
snake_case_ : int = 0
snake_case_ : Dict = 0
snake_case_ : Optional[int] = 0
snake_case_ : List[str] = 0
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = cva.imread(lowercase__ , 0 )
snake_case_ : Optional[int] = copy.deepcopy(self.img )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="""x""" )
snake_case_ : Union[str, Any] = np.sum(lowercase__ )
for i in range(len(lowercase__ ) ):
snake_case_ : Optional[int] = x[i] / self.k
self.sk += prk
snake_case_ : str = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : int = int(last % last )
snake_case_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase__ )
snake_case_ : List[str] = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : str = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Optional[int] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : int = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase (self ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __UpperCamelCase (self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
a_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
a_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : jnp.ndarray
@flax_register_to_config
class __lowercase ( nn.Module , _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
_A : int = 32
_A : int = 4
_A : int = 4
_A : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_A : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_A : Union[bool, Tuple[bool]] = False
_A : Tuple[int] = (320, 640, 1280, 1280)
_A : int = 2
_A : Union[int, Tuple[int]] = 8
_A : Optional[Union[int, Tuple[int]]] = None
_A : int = 1280
_A : float = 0.0
_A : bool = False
_A : jnp.dtype = jnp.floataa
_A : bool = True
_A : int = 0
_A : bool = False
def __UpperCamelCase (self , lowercase__ ):
# init input tensors
snake_case_ : str = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ : Any = jnp.zeros(lowercase__ , dtype=jnp.floataa )
snake_case_ : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ , snake_case_ : str = jax.random.split(lowercase__ )
snake_case_ : Optional[int] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowercase__ , lowercase__ , lowercase__ , lowercase__ )["params"]
def __UpperCamelCase (self ):
snake_case_ : Any = self.block_out_channels
snake_case_ : Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ : str = FlaxTimestepEmbedding(lowercase__ , dtype=self.dtype )
snake_case_ : Optional[Any] = self.only_cross_attention
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ : str = []
snake_case_ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ : Tuple = output_channel
snake_case_ : Any = block_out_channels[i]
snake_case_ : int = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ : Optional[Any] = FlaxDownBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase__ )
snake_case_ : Optional[int] = down_blocks
# mid
snake_case_ : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ : int = []
snake_case_ : Any = list(reversed(lowercase__ ) )
snake_case_ : Tuple = list(reversed(lowercase__ ) )
snake_case_ : str = list(reversed(lowercase__ ) )
snake_case_ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ : Optional[Any] = output_channel
snake_case_ : Union[str, Any] = reversed_block_out_channels[i]
snake_case_ : str = reversed_block_out_channels[min(i + 1 , len(lowercase__ ) - 1 )]
snake_case_ : Tuple = i == len(lowercase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ : Dict = FlaxCrossAttnUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ : List[str] = FlaxUpBlockaD(
in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase__ )
snake_case_ : Optional[Any] = output_channel
snake_case_ : Optional[int] = up_blocks
# out
snake_case_ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__ = True , lowercase__ = False , ):
# 1. time
if not isinstance(lowercase__ , jnp.ndarray ):
snake_case_ : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ : Any = timesteps.astype(dtype=jnp.floataa )
snake_case_ : List[Any] = jnp.expand_dims(lowercase__ , 0 )
snake_case_ : List[str] = self.time_proj(lowercase__ )
snake_case_ : Any = self.time_embedding(lowercase__ )
# 2. pre-process
snake_case_ : int = jnp.transpose(lowercase__ , (0, 2, 3, 1) )
snake_case_ : Any = self.conv_in(lowercase__ )
# 3. down
snake_case_ : int = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ , lowercase__ ):
snake_case_ , snake_case_ : List[Any] = down_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train )
else:
snake_case_ , snake_case_ : List[Any] = down_block(lowercase__ , lowercase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ : Optional[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase__ , lowercase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ : str = new_down_block_res_samples
# 4. mid
snake_case_ : Tuple = self.mid_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = up_block(
lowercase__ , temb=lowercase__ , encoder_hidden_states=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train , )
else:
snake_case_ : List[Any] = up_block(lowercase__ , temb=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train )
# 6. post-process
snake_case_ : List[Any] = self.conv_norm_out(lowercase__ )
snake_case_ : Tuple = nn.silu(lowercase__ )
snake_case_ : List[Any] = self.conv_out(lowercase__ )
snake_case_ : Optional[Any] = jnp.transpose(lowercase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase__ )
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
a_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
snake_case_ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Optional[Any] = value
elif weight_type == "bias":
snake_case_ : Union[str, Any] = value
else:
snake_case_ : Dict = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : int = []
snake_case_ : List[Any] = fairseq_model.state_dict()
snake_case_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Any = None
for name, value in fairseq_dict.items():
snake_case_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : Any = fairseq_model.proj
snake_case_ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Tuple = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
snake_case_ : List[Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ : Optional[Any] = """weight_g"""
elif "weight_v" in name:
snake_case_ : Any = """weight_v"""
elif "bias" in name:
snake_case_ : Tuple = """bias"""
elif "weight" in name:
snake_case_ : int = """weight"""
else:
snake_case_ : Optional[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
snake_case_ : List[str] = name.split(""".""" )
snake_case_ : Dict = int(items[0] )
snake_case_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ , snake_case_ : List[str] = emb.weight.shape
snake_case_ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : List[str] = [line.split(""" """ )[0] for line in lines]
snake_case_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
"""simple docstring"""
snake_case_ : List[Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ , decoder_layers=SCREAMING_SNAKE_CASE__ , do_stable_layer_norm=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Any = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ : Dict = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = False
# add projection layer
snake_case_ : Dict = nn.Parameter(projection_layer.weight )
snake_case_ : List[Any] = nn.Parameter(projection_layer.bias )
snake_case_ : List[Any] = create_vocab_dict(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = hf_wavavec.config.to_dict()
snake_case_ : Optional[Any] = tokenizer.pad_token_id
snake_case_ : Optional[Any] = tokenizer.bos_token_id
snake_case_ : Union[str, Any] = tokenizer.eos_token_id
snake_case_ : List[str] = """speech_to_text_2"""
snake_case_ : str = """wav2vec2"""
snake_case_ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
a_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowercase :
"""simple docstring"""
_A : Tuple = None
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = os.path.join(lowercase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(lowercase__ )
snake_case_ : Any = self.feature_extraction_class.from_json_file(lowercase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(lowercase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.feature_extraction_class()
self.assertIsNotNone(lowercase__ )
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__=None ):
if not conversation_id:
snake_case_ : int = uuid.uuida()
if past_user_inputs is None:
snake_case_ : int = []
if generated_responses is None:
snake_case_ : List[Any] = []
snake_case_ : uuid.UUID = conversation_id
snake_case_ : List[str] = past_user_inputs
snake_case_ : List[str] = generated_responses
snake_case_ : Optional[str] = text
def __eq__(self , lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCamelCase (self , lowercase__ , lowercase__ = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
snake_case_ : Any = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
snake_case_ : List[Any] = text
def __UpperCamelCase (self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ : Dict = None
def __UpperCamelCase (self , lowercase__ ):
self.generated_responses.append(lowercase__ )
def __UpperCamelCase (self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ):
snake_case_ : List[str] = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
snake_case_ : Optional[Any] = """user""" if is_user else """bot"""
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
_UpperCAmelCase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , **lowercase__ ):
super().__init__(*lowercase__ , **lowercase__ )
if self.tokenizer.pad_token_id is None:
snake_case_ : Union[str, Any] = self.tokenizer.eos_token
def __UpperCamelCase (self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Tuple = {}
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = {}
if min_length_for_response is not None:
snake_case_ : Dict = min_length_for_response
if minimum_tokens is not None:
snake_case_ : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ : List[str] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase__ )
return preprocess_params, forward_params, postprocess_params
def __call__(self , lowercase__ , lowercase__=0 , **lowercase__ ):
snake_case_ : List[str] = super().__call__(lowercase__ , num_workers=lowercase__ , **lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) == 1:
return outputs[0]
return outputs
def __UpperCamelCase (self , lowercase__ , lowercase__=32 ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
snake_case_ : str = self.tokenizer._build_conversation_input_ids(lowercase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ : Any = self._legacy_parse_and_tokenize(lowercase__ )
if self.framework == "pt":
snake_case_ : Optional[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCamelCase (self , lowercase__ , lowercase__=10 , **lowercase__ ):
snake_case_ : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
snake_case_ : str = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
snake_case_ : int = max_length - minimum_tokens
snake_case_ : List[Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ : Union[str, Any] = model_inputs["""attention_mask"""][:, -trim:]
snake_case_ : int = model_inputs.pop("""conversation""" )
snake_case_ : Dict = max_length
snake_case_ : Tuple = self.model.generate(**lowercase__ , **lowercase__ )
if self.model.config.is_encoder_decoder:
snake_case_ : str = 1
else:
snake_case_ : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCamelCase (self , lowercase__ , lowercase__=True ):
snake_case_ : int = model_outputs["""output_ids"""]
snake_case_ : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , )
snake_case_ : Optional[Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowercase__ )
return conversation
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Dict = self.tokenizer.eos_token_id
snake_case_ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) )
if len(lowercase__ ) > self.tokenizer.model_max_length:
snake_case_ : int = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __get__(self , lowercase__ , lowercase__=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
snake_case_ : int = """__cached_""" + self.fget.__name__
snake_case_ : str = getattr(lowercase__ , lowercase__ , lowercase__ )
if cached is None:
snake_case_ : Any = self.fget(lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
return cached
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
if is_torch_fx_proxy(SCREAMING_SNAKE_CASE__ ):
return True
if is_torch_available():
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(SCREAMING_SNAKE_CASE__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return _is_numpy(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
import torch
return isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
import torch
return isinstance(SCREAMING_SNAKE_CASE__ , torch.device )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return False
return isinstance(SCREAMING_SNAKE_CASE__ , torch.dtype )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
import tensorflow as tf
return isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(SCREAMING_SNAKE_CASE__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE__ )
return type(SCREAMING_SNAKE_CASE__ ) == tf.Tensor
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (dict, UserDict) ):
return {k: to_py_obj(SCREAMING_SNAKE_CASE__ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_py_obj(SCREAMING_SNAKE_CASE__ ) for o in obj]
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.numpy().tolist()
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return np.asarray(SCREAMING_SNAKE_CASE__ ).tolist()
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (dict, UserDict) ):
return {k: to_numpy(SCREAMING_SNAKE_CASE__ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return np.array(SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.numpy()
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return np.asarray(SCREAMING_SNAKE_CASE__ )
else:
return obj
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : str = fields(self )
# Safety and consistency checks
if not len(lowercase__ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
snake_case_ : Union[str, Any] = getattr(self , class_fields[0].name )
snake_case_ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Optional[Any] = first_field.items()
snake_case_ : int = True
else:
try:
snake_case_ : Any = iter(lowercase__ )
snake_case_ : str = True
except TypeError:
snake_case_ : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase__ ):
if (
not isinstance(lowercase__ , (list, tuple) )
or not len(lowercase__ ) == 2
or not isinstance(element[0] , lowercase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case_ : Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case_ : Union[str, Any] = element[1]
elif first_field is not None:
snake_case_ : Tuple = first_field
else:
for field in class_fields:
snake_case_ : Optional[int] = getattr(self , field.name )
if v is not None:
snake_case_ : Optional[int] = v
def __delitem__(self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__(self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , lowercase__ , lowercase__ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase__ , lowercase__ )
super().__setattr__(lowercase__ , lowercase__ )
def __setitem__(self , lowercase__ , lowercase__ ):
# Will raise a KeyException if needed
super().__setitem__(lowercase__ , lowercase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
return tuple(self[k] for k in self.keys() )
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
@classmethod
def __UpperCamelCase (cls , lowercase__ ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = """longest"""
_A : Optional[Any] = """max_length"""
_A : Optional[int] = """do_not_pad"""
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Any = """pt"""
_A : Tuple = """tf"""
_A : Any = """np"""
_A : Dict = """jax"""
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Any = context_managers
snake_case_ : int = ExitStack()
def __enter__(self ):
for context_manager in self.context_managers:
self.stack.enter_context(lowercase__ )
def __exit__(self , *lowercase__ , **lowercase__ ):
self.stack.__exit__(*lowercase__ , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Dict = infer_framework(SCREAMING_SNAKE_CASE__ )
if framework == "tf":
snake_case_ : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case_ : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case_ : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : Optional[int] = model_class.__name__
snake_case_ : Tuple = infer_framework(SCREAMING_SNAKE_CASE__ )
if framework == "tf":
snake_case_ : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case_ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case_ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : MutableMapping , SCREAMING_SNAKE_CASE__ : str = "" , SCREAMING_SNAKE_CASE__ : str = "." ):
"""simple docstring"""
def _flatten_dict(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]="" , SCREAMING_SNAKE_CASE__ : List[Any]="." ):
for k, v in d.items():
snake_case_ : Tuple = str(SCREAMING_SNAKE_CASE__ ) + delimiter + str(SCREAMING_SNAKE_CASE__ ) if parent_key else k
if v and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
yield from flatten_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delimiter=SCREAMING_SNAKE_CASE__ ).items()
else:
yield key, v
return dict(_flatten_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.transpose(SCREAMING_SNAKE_CASE__ , axes=SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.transpose(SCREAMING_SNAKE_CASE__ , perm=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.transpose(SCREAMING_SNAKE_CASE__ , axes=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for transpose: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.reshape(*SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for reshape: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=None ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.expand_dims(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.unsqueeze(dim=SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.expand_dims(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.size(SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.numel()
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.size(SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(SCREAMING_SNAKE_CASE__ , (tuple, list) ):
snake_case_ : Dict = [f'{repo_id}--{v}' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case_ : Union[str, Any] = f'{repo_id}--{value}'
return auto_map
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
for base_class in inspect.getmro(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = base_class.__module__
snake_case_ : int = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ : Tuple = """_"""
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[str] ):
"""simple docstring"""
snake_case_ : Any = []
while True:
snake_case_ : Tuple = ["""$"""] * len(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ : Tuple = compare_string(binary[i] , binary[j] )
if k is False:
snake_case_ : List[str] = """*"""
snake_case_ : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return pi
snake_case_ : List[str] = list(set(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Sequence[float] ):
"""simple docstring"""
snake_case_ : Tuple = []
for minterm in minterms:
snake_case_ : Optional[int] = """"""
for _ in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE__ )
return temp
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[str] ):
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : int = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(len(chart[0] ) ):
snake_case_ : List[Any] = 0
snake_case_ : List[str] = -1
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if chart[j][i] == 1:
count += 1
snake_case_ : List[str] = j
if count == 1:
snake_case_ : List[str] = 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ : Dict = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ : str = 0
snake_case_ : Optional[int] = -1
snake_case_ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
snake_case_ : Dict = count_n
snake_case_ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ : Any = 0
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : list[str] ):
"""simple docstring"""
snake_case_ : List[str] = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ : List[str] = prime_implicants[i].count("""_""" )
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Dict = int(input("""Enter the no. of variables\n""" ) )
snake_case_ : List[str] = [
float(SCREAMING_SNAKE_CASE__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
snake_case_ : List[Any] = decimal_to_binary(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : int = check(SCREAMING_SNAKE_CASE__ )
print("""Prime Implicants are:""" )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = prime_implicant_chart(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = selection(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Essential Prime Implicants are:""" )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ):
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Dict = do_rescale
snake_case_ : int = size_divisor
snake_case_ : int = resample
super().__init__(**lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ):
snake_case_ , snake_case_ : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case_ : Optional[int] = height // size_divisor * size_divisor
snake_case_ : int = width // size_divisor * size_divisor
snake_case_ : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ):
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : int = size_divisor if size_divisor is not None else self.size_divisor
snake_case_ : List[str] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
snake_case_ : int = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
snake_case_ : List[str] = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
snake_case_ : Any = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : Dict = [self.rescale(lowercase__ , scale=1 / 2_55 ) for image in images]
snake_case_ : int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : int = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 1
|
"""simple docstring"""
import sys
from collections import defaultdict
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : Optional[Any] = []
def __UpperCamelCase (self , lowercase__ ):
return self.node_position[vertex]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Tuple = pos
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ : Tuple = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ : Union[str, Any] = 2 * start + 1
else:
snake_case_ : List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ : Dict = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ : Optional[int] = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ : Optional[int] = temp, tempa
snake_case_ : str = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase__ )
self.top_to_bottom(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Any = position[index]
while index != 0:
snake_case_ : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case_ : List[str] = heap[parent]
snake_case_ : Dict = position[parent]
self.set_position(position[parent] , lowercase__ )
else:
snake_case_ : Dict = val
snake_case_ : int = temp
self.set_position(lowercase__ , lowercase__ )
break
snake_case_ : Any = parent
else:
snake_case_ : int = val
snake_case_ : List[Any] = temp
self.set_position(lowercase__ , 0 )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Optional[int] = len(lowercase__ ) // 2 - 1
for i in range(lowercase__ , -1 , -1 ):
self.top_to_bottom(lowercase__ , lowercase__ , len(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[str] = positions[0]
snake_case_ : int = sys.maxsize
self.top_to_bottom(lowercase__ , 0 , len(lowercase__ ) , lowercase__ )
return temp
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[str] = Heap()
snake_case_ : List[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = [-1] * len(SCREAMING_SNAKE_CASE__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ : str = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ : int = []
for vertex in range(len(SCREAMING_SNAKE_CASE__ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE__ )
heap.node_position.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = []
snake_case_ : str = 1
snake_case_ : List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ : Dict = 0
snake_case_ : Dict = distance
heap.heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for _ in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ : Any = heap.delete_minimum(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ : Tuple = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE__ )]
):
snake_case_ : List[Any] = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE__ , heap.get_position(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a_ = int(input('''Enter number of edges: ''').strip())
a_ = defaultdict(list)
for _ in range(edges_number):
a_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 1
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Dict = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ : List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 48
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : List[str] = []
snake_case_ : Dict = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
snake_case_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , shell=SCREAMING_SNAKE_CASE__ , stdout=subprocess.PIPE )
snake_case_ : Tuple = output.stdout.decode("""utf-8""" )
snake_case_ : Dict = json.loads(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : Optional[int] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return values.split(""",""" )
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
a_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 48
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case_ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
snake_case_ : List[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
snake_case_ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
snake_case_ : Optional[Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
snake_case_ : Dict = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
snake_case_ : List[Any] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
snake_case_ : Union[str, Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
snake_case_ : Tuple = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
snake_case_ : Optional[int] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
snake_case_ : str = key.replace("""image_encoder.module""" , """flava.image_model""" )
snake_case_ : Optional[Any] = key.replace("""text_encoder.module""" , """flava.text_model""" )
snake_case_ : List[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
snake_case_ : List[str] = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
snake_case_ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
snake_case_ : Tuple = key.replace("""image_projection""" , """flava.image_projection""" )
snake_case_ : Tuple = value.float()
for key, value in codebook_state_dict.items():
snake_case_ : Union[str, Any] = value
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=None ):
"""simple docstring"""
if config_path is not None:
snake_case_ : Dict = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Union[str, Any] = FlavaConfig()
snake_case_ : Optional[Any] = FlavaForPreTraining(SCREAMING_SNAKE_CASE__ ).eval()
snake_case_ : str = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , save_checkpoint=SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
else:
snake_case_ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
snake_case_ : Tuple = upgrade_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = hf_model.state_dict()
snake_case_ : Any = count_parameters(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = count_parameters(SCREAMING_SNAKE_CASE__ ) + count_parameters(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 48
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = RemBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print("""Building PyTorch model from configuration: {}""".format(str(SCREAMING_SNAKE_CASE__ ) ) )
snake_case_ : Any = RemBertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(SCREAMING_SNAKE_CASE__ ) )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = '''RegNetConfig'''
# Base docstring
a_ = '''facebook/regnet-y-040'''
a_ = [1, 1088, 7, 7]
# Image classification docstring
a_ = '''facebook/regnet-y-040'''
a_ = '''tabby, tabby cat'''
a_ = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = 3 , lowercase__ = 1 , lowercase__ = 1 , lowercase__ = "relu" , **lowercase__ , ):
super().__init__(**lowercase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case_ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case_ : List[Any] = tf.keras.layers.ConvaD(
filters=lowercase__ , kernel_size=lowercase__ , strides=lowercase__ , padding="""VALID""" , groups=lowercase__ , use_bias=lowercase__ , name="""convolution""" , )
snake_case_ : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
snake_case_ : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convolution(self.padding(lowercase__ ) )
snake_case_ : Tuple = self.normalization(lowercase__ )
snake_case_ : int = self.activation(lowercase__ )
return hidden_state
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : Optional[Any] = config.num_channels
snake_case_ : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = shape_list(lowercase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case_ : Union[str, Any] = tf.transpose(lowercase__ , perm=(0, 2, 3, 1) )
snake_case_ : Optional[Any] = self.embedder(lowercase__ )
return hidden_state
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = 2 , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : Dict = tf.keras.layers.ConvaD(
filters=lowercase__ , kernel_size=1 , strides=lowercase__ , use_bias=lowercase__ , name="""convolution""" )
snake_case_ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ = False ):
return self.normalization(self.convolution(lowercase__ ) , training=lowercase__ )
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__ , name="""pooler""" )
snake_case_ : str = [
tf.keras.layers.ConvaD(filters=lowercase__ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowercase__ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def __UpperCamelCase (self , lowercase__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case_ : int = self.pooler(lowercase__ )
for layer_module in self.attention:
snake_case_ : Union[str, Any] = layer_module(lowercase__ )
snake_case_ : List[Any] = hidden_state * pooled
return hidden_state
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : str = in_channels != out_channels or stride != 1
snake_case_ : Any = max(1 , out_channels // config.groups_width )
snake_case_ : int = (
TFRegNetShortCut(lowercase__ , stride=lowercase__ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case_ : str = [
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=lowercase__ , name="""layer.2""" ),
]
snake_case_ : Any = ACTaFN[config.hidden_act]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = hidden_state
for layer_module in self.layers:
snake_case_ : Tuple = layer_module(lowercase__ )
snake_case_ : List[Any] = self.shortcut(lowercase__ )
hidden_state += residual
snake_case_ : List[Any] = self.activation(lowercase__ )
return hidden_state
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : str = in_channels != out_channels or stride != 1
snake_case_ : List[Any] = max(1 , out_channels // config.groups_width )
snake_case_ : Optional[Any] = (
TFRegNetShortCut(lowercase__ , stride=lowercase__ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
snake_case_ : Tuple = [
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=lowercase__ , name="""layer.3""" ),
]
snake_case_ : Optional[Any] = ACTaFN[config.hidden_act]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = hidden_state
for layer_module in self.layers:
snake_case_ : Optional[int] = layer_module(lowercase__ )
snake_case_ : List[str] = self.shortcut(lowercase__ )
hidden_state += residual
snake_case_ : List[Any] = self.activation(lowercase__ )
return hidden_state
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 2 , lowercase__ = 2 , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
snake_case_ : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , name="""layers.0""" ),
*[layer(lowercase__ , lowercase__ , lowercase__ , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def __UpperCamelCase (self , lowercase__ ):
for layer_module in self.layers:
snake_case_ : str = layer_module(lowercase__ )
return hidden_state
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__(self , lowercase__ , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
snake_case_ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ , name=f'stages.{i+1}' ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ = False , lowercase__ = True ):
snake_case_ : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ : Dict = hidden_states + (hidden_state,)
snake_case_ : List[str] = stage_module(lowercase__ )
if output_hidden_states:
snake_case_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
@keras_serializable
class __lowercase ( tf.keras.layers.Layer):
"""simple docstring"""
_A : Dict = RegNetConfig
def __init__(self , lowercase__ , **lowercase__ ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = config
snake_case_ : Optional[int] = TFRegNetEmbeddings(lowercase__ , name="""embedder""" )
snake_case_ : Tuple = TFRegNetEncoder(lowercase__ , name="""encoder""" )
snake_case_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__ , name="""pooler""" )
@unpack_inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
snake_case_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Optional[int] = self.embedder(lowercase__ , training=lowercase__ )
snake_case_ : List[Any] = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Optional[Any] = self.pooler(lowercase__ )
# Change to NCHW output format have uniformity in the modules
snake_case_ : Optional[int] = tf.transpose(lowercase__ , perm=(0, 3, 1, 2) )
snake_case_ : List[Any] = tf.transpose(lowercase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case_ : Any = tuple([tf.transpose(lowercase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Any = RegNetConfig
_A : List[Any] = """regnet"""
_A : Tuple = """pixel_values"""
@property
def __UpperCamelCase (self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
a_ = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
a_ = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCAmelCase , )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , *lowercase__ , **lowercase__ ):
super().__init__(lowercase__ , *lowercase__ , **lowercase__ )
snake_case_ : Tuple = TFRegNetMainLayer(lowercase__ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=False , ):
snake_case_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : int = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Dict = self.regnet(
pixel_values=lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _UpperCAmelCase , )
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , *lowercase__ , **lowercase__ ):
super().__init__(lowercase__ , *lowercase__ , **lowercase__ )
snake_case_ : Optional[int] = config.num_labels
snake_case_ : List[str] = TFRegNetMainLayer(lowercase__ , name="""regnet""" )
# classification head
snake_case_ : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase (self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__=False , ):
snake_case_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : List[Any] = self.regnet(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ )
snake_case_ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
snake_case_ : Tuple = self.classifier[0](lowercase__ )
snake_case_ : Optional[int] = self.classifier[1](lowercase__ )
snake_case_ : Any = None if labels is None else self.hf_compute_loss(labels=lowercase__ , logits=lowercase__ )
if not return_dict:
snake_case_ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 48
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 1
|
"""simple docstring"""
a_ = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[PIL.Image.Image, np.ndarray]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
prior=lowercase__ , image_encoder=lowercase__ , image_processor=lowercase__ , scheduler=lowercase__ , renderer=lowercase__ , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if latents is None:
snake_case_ : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
snake_case_ : Tuple = latents.to(lowercase__ )
snake_case_ : Any = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase (self , lowercase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ : List[Any] = torch.device(f'cuda:{gpu_id}' )
snake_case_ : List[str] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
@property
def __UpperCamelCase (self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(image[0] , torch.Tensor ):
snake_case_ : Tuple = torch.cat(lowercase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase__ , axis=0 )
if not isinstance(lowercase__ , torch.Tensor ):
snake_case_ : List[str] = self.image_processor(lowercase__ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
snake_case_ : List[Any] = image.to(dtype=self.image_encoder.dtype , device=lowercase__ )
snake_case_ : int = self.image_encoder(lowercase__ )["""last_hidden_state"""]
snake_case_ : Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
snake_case_ : List[str] = image_embeds.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
snake_case_ : Any = torch.zeros_like(lowercase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__(self , lowercase__ , lowercase__ = 1 , lowercase__ = 25 , lowercase__ = None , lowercase__ = None , lowercase__ = 4.0 , lowercase__ = 64 , lowercase__ = "pil" , lowercase__ = True , ):
if isinstance(lowercase__ , PIL.Image.Image ):
snake_case_ : Dict = 1
elif isinstance(lowercase__ , torch.Tensor ):
snake_case_ : str = image.shape[0]
elif isinstance(lowercase__ , lowercase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
snake_case_ : Union[str, Any] = len(lowercase__ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase__ )}' )
snake_case_ : Tuple = self._execution_device
snake_case_ : int = batch_size * num_images_per_prompt
snake_case_ : Optional[int] = guidance_scale > 1.0
snake_case_ : List[str] = self._encode_image(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# prior
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
snake_case_ : Any = self.scheduler.timesteps
snake_case_ : int = self.prior.config.num_embeddings
snake_case_ : Optional[int] = self.prior.config.embedding_dim
snake_case_ : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
snake_case_ : Optional[int] = latents.reshape(latents.shape[0] , lowercase__ , lowercase__ )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Any = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
snake_case_ : List[Any] = self.prior(
lowercase__ , timestep=lowercase__ , proj_embedding=lowercase__ , ).predicted_image_embedding
# remove the variance
snake_case_ , snake_case_ : Any = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
snake_case_ , snake_case_ : List[Any] = noise_pred.chunk(2 )
snake_case_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
snake_case_ : int = self.scheduler.step(
lowercase__ , timestep=lowercase__ , sample=lowercase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase__ )
snake_case_ : List[str] = []
for i, latent in enumerate(lowercase__ ):
print()
snake_case_ : int = self.renderer.decode(
latent[None, :] , lowercase__ , size=lowercase__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowercase__ )
snake_case_ : Any = torch.stack(lowercase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
snake_case_ : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
snake_case_ : List[str] = [self.numpy_to_pil(lowercase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase__ )
| 48
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = """timm_backbone"""
def __init__(self , lowercase__=None , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : int = backbone
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = features_only
snake_case_ : str = use_pretrained_backbone
snake_case_ : str = True
snake_case_ : List[Any] = out_indices if out_indices is not None else (-1,)
| 48
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : int = 1
snake_case_ : List[str] = 2
while i * i <= n:
snake_case_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = 1
snake_case_ : str = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 48
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Dict = VQModel
_A : Union[str, Any] = """sample"""
@property
def __UpperCamelCase (self , lowercase__=(32, 32) ):
snake_case_ : int = 4
snake_case_ : Union[str, Any] = 3
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def __UpperCamelCase (self ):
return (3, 32, 32)
@property
def __UpperCamelCase (self ):
return (3, 32, 32)
def __UpperCamelCase (self ):
snake_case_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : int = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase__ )
snake_case_ : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowercase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ : str = image.to(lowercase__ )
with torch.no_grad():
snake_case_ : Dict = model(lowercase__ ).sample
snake_case_ : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ : int = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a_ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a_ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : int = collections.OrderedDict()
snake_case_ : Optional[int] = collections.OrderedDict()
snake_case_ : int = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Optional[Any] = f.readlines()
snake_case_ : Any = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = b
snake_case_ : List[Any] = idx
for wd in b:
snake_case_ : Optional[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self , lowercase__ , lowercase__ , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|startoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ):
super().__init__(
unk_token=lowercase__ , pad_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , do_clean_text=lowercase__ , **lowercase__ , )
if not os.path.isfile(lowercase__ ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowercase__ ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
snake_case_ : Optional[int] = do_clean_text
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = load_vocab_and_emoji(lowercase__ , lowercase__ )
snake_case_ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCamelCase (self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __UpperCamelCase (self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCamelCase (self , lowercase__ ):
return self.subword_tokenizer.tokenize(lowercase__ , clean=self.do_clean_text )
def __UpperCamelCase (self , lowercase__ ):
return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase (self , lowercase__ ):
return self.subword_tokenizer.convert_id_to_token(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Union[str, Any] = """""".join(lowercase__ ).strip()
return out_string
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
snake_case_ : Dict = input_ids[-self.model_max_length :]
return input_ids
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : int = 0
if os.path.isdir(lowercase__ ):
snake_case_ : Optional[int] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Tuple = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
snake_case_ : Optional[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
snake_case_ : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
snake_case_ : int = token_index
writer.write(""",""".join(lowercase__ ) + """\n""" )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowercase__ )
return vocab_file, emoji_file
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : int = vocab # same as swe
snake_case_ : Tuple = ids_to_tokens # same as bpe
snake_case_ : Any = emoji
snake_case_ : Any = np.max([len(lowercase__ ) for w in self.vocab.keys()] )
snake_case_ : Optional[int] = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
snake_case_ : Dict = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
snake_case_ : Optional[Any] = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
snake_case_ : Union[str, Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
snake_case_ : List[Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
snake_case_ : str = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
snake_case_ : int = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
snake_case_ : str = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
snake_case_ : Any = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__(self ):
return len(self.ids_to_tokens )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Any = self.content_repattera.sub("""<URL>""" , lowercase__ )
snake_case_ : Tuple = self.content_repattera.sub("""<EMAIL>""" , lowercase__ )
snake_case_ : Union[str, Any] = self.content_repattera.sub("""<TEL>""" , lowercase__ )
snake_case_ : Dict = self.content_repattera.sub("""<DATE>""" , lowercase__ )
snake_case_ : Tuple = self.content_repattera.sub("""<DATE>""" , lowercase__ )
snake_case_ : Tuple = self.content_repattera.sub("""<PRICE>""" , lowercase__ )
snake_case_ : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
snake_case_ : Tuple = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __UpperCamelCase (self , lowercase__ , lowercase__=False ):
snake_case_ : Optional[int] = text.replace(""" """ , """<SP>""" )
snake_case_ : Optional[int] = text.replace(""" """ , """<SP>""" )
snake_case_ : Any = text.replace("""\r\n""" , """<BR>""" )
snake_case_ : Dict = text.replace("""\n""" , """<BR>""" )
snake_case_ : Optional[Any] = text.replace("""\r""" , """<BR>""" )
snake_case_ : Optional[Any] = text.replace("""\t""" , """<TAB>""" )
snake_case_ : List[str] = text.replace("""—""" , """ー""" )
snake_case_ : Union[str, Any] = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
snake_case_ : Union[str, Any] = text.replace(lowercase__ , lowercase__ )
if clean:
snake_case_ : Optional[Any] = self.clean_text(lowercase__ )
def check_simbol(lowercase__ ):
snake_case_ : int = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 2:
snake_case_ : Tuple = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(lowercase__ ):
snake_case_ : int = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 3:
snake_case_ : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
snake_case_ : Optional[Any] = 0
snake_case_ : List[str] = []
while pos < len(lowercase__ ):
snake_case_ : Tuple = min(len(lowercase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
snake_case_ : Optional[Any] = [] # (token_id, token, pos)
for e in range(lowercase__ , lowercase__ , -1 ):
snake_case_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase__ ) > 2:
snake_case_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase__ ) > 0:
# the smallest token_id is adopted
snake_case_ , snake_case_ , snake_case_ : Optional[int] = sorted(lowercase__ , key=lambda lowercase__ : x[0] )[0]
result.append(lowercase__ )
snake_case_ : Union[str, Any] = e
else:
snake_case_ : Optional[Any] = pos + 1
snake_case_ : Tuple = text[pos:end]
if check_simbol(lowercase__ ):
result.append("""<KIGOU>""" )
elif checkuae(lowercase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
snake_case_ : Tuple = end
return result
def __UpperCamelCase (self , lowercase__ , lowercase__="\n" ):
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
snake_case_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode("""utf-8""" , errors="""replace""" ) )
snake_case_ : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowercase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowercase__ )
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode("""utf-8""" , errors="""replace""" ) )
snake_case_ : str = """""".join(lowercase__ )
return text
| 48
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 1
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
a_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
"""simple docstring"""
_A : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
_A : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """A folder containing the training data."""})
_A : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """A folder containing the validation data."""})
_A : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
_A : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""})
_A : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
_A : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_A : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __UpperCamelCase (self ):
snake_case_ : Any = {}
if self.train_dir is not None:
snake_case_ : Any = self.train_dir
if self.validation_dir is not None:
snake_case_ : Tuple = self.validation_dir
snake_case_ : int = data_files if data_files else None
@dataclass
class __lowercase :
"""simple docstring"""
_A : str = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCAmelCase)} , )
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_A : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
_A : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_A : str = field(default=_UpperCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""})
_A : bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_A : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
_A : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
_A : Optional[int] = field(
default=_UpperCAmelCase , metadata={"""help""": """Stride to use for the encoder."""} , )
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__=1_92 , lowercase__=32 , lowercase__=4 , lowercase__=0.6 ):
snake_case_ : Optional[Any] = input_size
snake_case_ : List[Any] = mask_patch_size
snake_case_ : Tuple = model_patch_size
snake_case_ : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
snake_case_ : List[Any] = self.input_size // self.mask_patch_size
snake_case_ : List[str] = self.mask_patch_size // self.model_patch_size
snake_case_ : Optional[int] = self.rand_size**2
snake_case_ : Union[str, Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__(self ):
snake_case_ : Optional[int] = np.random.permutation(self.token_count )[: self.mask_count]
snake_case_ : Optional[Any] = np.zeros(self.token_count , dtype=lowercase__ )
snake_case_ : Optional[Any] = 1
snake_case_ : Any = mask.reshape((self.rand_size, self.rand_size) )
snake_case_ : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Any = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case_ : Optional[int] = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
snake_case_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
snake_case_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
snake_case_ : List[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
snake_case_ : List[str] = split["""train"""]
snake_case_ : List[str] = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Dict = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
snake_case_ : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
snake_case_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(SCREAMING_SNAKE_CASE__ , """decoder_type""" ):
snake_case_ : str = """simmim"""
# adapt config
snake_case_ : Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
snake_case_ : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
snake_case_ : List[Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : str = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
snake_case_ : Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
snake_case_ : List[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case_ : Optional[int] = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
snake_case_ : str = ds["""train"""].column_names
else:
snake_case_ : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
snake_case_ : Any = data_args.image_column_name
elif "image" in column_names:
snake_case_ : str = """image"""
elif "img" in column_names:
snake_case_ : Dict = """img"""
else:
snake_case_ : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
snake_case_ : List[str] = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
snake_case_ : Any = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(SCREAMING_SNAKE_CASE__ : List[Any] ):
snake_case_ : List[str] = [transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
snake_case_ : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case_ : Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case_ : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
snake_case_ : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
snake_case_ : Any = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : int = last_checkpoint
snake_case_ : List[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : List[Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
snake_case_ : Optional[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a_ = False
class __lowercase ( unittest.TestCase):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = pipe(
image=lowercase__ , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
snake_case_ : str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : Any = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 48
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : List[str] = []
snake_case_ : int = 1
while len(SCREAMING_SNAKE_CASE__ ) < 1E6:
constant.append(str(SCREAMING_SNAKE_CASE__ ) )
i += 1
snake_case_ : str = """""".join(SCREAMING_SNAKE_CASE__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 48
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : List[Any] = 0
while b > 0:
if b & 1:
snake_case_ : Union[str, Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = StableUnCLIPPipeline
_A : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_A : Optional[Any] = False
def __UpperCamelCase (self ):
snake_case_ : List[str] = 32
snake_case_ : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
snake_case_ : List[Any] = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
snake_case_ : int = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
snake_case_ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
snake_case_ : str = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
snake_case_ : List[Any] = AutoencoderKL()
snake_case_ : Union[str, Any] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __UpperCamelCase (self , lowercase__ , lowercase__=0 ):
if str(lowercase__ ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(lowercase__ )
else:
snake_case_ : List[Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
snake_case_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Any = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
snake_case_ : Dict = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : List[str] = pipe("""anime turle""" , generator=lowercase__ , output_type="""np""" )
snake_case_ : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
snake_case_ : Tuple = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Optional[int] = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 48
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ):
"""simple docstring"""
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[column] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=float("""inf""" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ : Tuple = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=float("""inf""" ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE__ ):
for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ : Union[str, Any] = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# recursion
snake_case_ : Optional[Any] = points_counts // 2
snake_case_ : Union[str, Any] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[mid:] , points_counts - mid )
snake_case_ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Tuple = column_based_sort(SCREAMING_SNAKE_CASE__ , column=0 )
snake_case_ : str = column_based_sort(SCREAMING_SNAKE_CASE__ , column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
) ** 0.5
if __name__ == "__main__":
a_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = 0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(SCREAMING_SNAKE_CASE__ )[-1_0:]
if __name__ == "__main__":
print(solution())
| 48
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.